mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 16:34:25 +01:00
Compare commits
150 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea36949d8a | ||
|
|
0564d0a2cf | ||
|
|
b22ba6eb49 | ||
|
|
3a221812f6 | ||
|
|
927ff4cc03 | ||
|
|
e5945a60aa | ||
|
|
072edd56b3 | ||
|
|
beb5f6bddc | ||
|
|
8be9792059 | ||
|
|
af2fc48378 | ||
|
|
1d2f95e718 | ||
|
|
3a8743d511 | ||
|
|
0209402942 | ||
|
|
fb55f5b762 | ||
|
|
01cddb8013 | ||
|
|
1083279a45 | ||
|
|
fe16ea566d | ||
|
|
3356e03d85 | ||
|
|
f41db52560 | ||
|
|
5181cb0474 | ||
|
|
a44e1b8b05 | ||
|
|
276978377a | ||
|
|
777eb96aea | ||
|
|
0912ef14f4 | ||
|
|
77a8ce1712 | ||
|
|
87b628ba24 | ||
|
|
50d6a1e8ca | ||
|
|
e78fe0b9ef | ||
|
|
5fccbb8676 | ||
|
|
c289c7a7ca | ||
|
|
e3fbfbfd4d | ||
|
|
282ca4368e | ||
|
|
280fa026ea | ||
|
|
dbdb48f182 | ||
|
|
f7e392995a | ||
|
|
d271df8da8 | ||
|
|
eea5e6a5df | ||
|
|
790268a176 | ||
|
|
06b480e177 | ||
|
|
076ebc6c6e | ||
|
|
7edcf620c0 | ||
|
|
5a131b2975 | ||
|
|
223cc6e660 | ||
|
|
5671c6607c | ||
|
|
7ecafbe61d | ||
|
|
546eb3bfbc | ||
|
|
7364d99e34 | ||
|
|
83b6dc7b16 | ||
|
|
3d0da7c859 | ||
|
|
ed00f5d530 | ||
|
|
38e56a4858 | ||
|
|
fce93ccb54 | ||
|
|
0d715effbc | ||
|
|
0c003b64f1 | ||
|
|
14d0106716 | ||
|
|
959b015b3b | ||
|
|
0bffa76b5e | ||
|
|
03e70210a5 | ||
|
|
9c6592b159 | ||
|
|
e5af94e27a | ||
|
|
96f51f78ea | ||
|
|
a10baeee92 | ||
|
|
52c9e360e7 | ||
|
|
8caaff7109 | ||
|
|
1e3c155896 | ||
|
|
f5db03c834 | ||
|
|
c5ce945852 | ||
|
|
7e380bde7e | ||
|
|
a3e59a38ef | ||
|
|
8ba5d64dbc | ||
|
|
3bbf5f4e67 | ||
|
|
928731acfe | ||
|
|
57eb80e9fb | ||
|
|
96f4dcaab8 | ||
|
|
6d8c5f437c | ||
|
|
165b671e70 | ||
|
|
6be0bad68a | ||
|
|
7ae3cd25f8 | ||
|
|
9a7ed57a3f | ||
|
|
eb9f22a8fa | ||
|
|
54a8499c7b | ||
|
|
419aaf2e36 | ||
|
|
1701087035 | ||
|
|
a9cb2e06f4 | ||
|
|
115b4b70b1 | ||
|
|
0707caedb4 | ||
|
|
bd9cc01d62 | ||
|
|
d1f786419c | ||
|
|
31ed9269d7 | ||
|
|
48eb63899f | ||
|
|
b26c13336f | ||
|
|
e0185c4b01 | ||
|
|
702e1c59bd | ||
|
|
5fe8f45d05 | ||
|
|
03e4a7f988 | ||
|
|
0b67b19771 | ||
|
|
a0d3b93ae5 | ||
|
|
58ec1f7a7b | ||
|
|
397fe5f879 | ||
|
|
9b03053191 | ||
|
|
3cb4e0ef57 | ||
|
|
e0553822b0 | ||
|
|
d3fe3efcb0 | ||
|
|
fd99ce9a71 | ||
|
|
6685856b5d | ||
|
|
a56a97e5c3 | ||
|
|
ee8e1348e9 | ||
|
|
1a6c657451 | ||
|
|
6b3d42efa5 | ||
|
|
2801fb2286 | ||
|
|
e28336c5db | ||
|
|
3e5c7e6860 | ||
|
|
8a82e0fb16 | ||
|
|
f0ef80500d | ||
|
|
61b784d2bb | ||
|
|
5da79e2a4c | ||
|
|
e1af37e46d | ||
|
|
6e0ae4f9a3 | ||
|
|
f0ac61c1f0 | ||
|
|
92cc32f844 | ||
|
|
2ea360e5e2 | ||
|
|
469ae78748 | ||
|
|
a06977bbd5 | ||
|
|
5bd8712946 | ||
|
|
0fc4d8192f | ||
|
|
5278b6f926 | ||
|
|
c177126ed0 | ||
|
|
c44da3abee | ||
|
|
b7e73da943 | ||
|
|
ff54bfd9f3 | ||
|
|
b5a85a6eb8 | ||
|
|
3ae242fa5f | ||
|
|
cb2ec861ea | ||
|
|
a3e6edf9c7 | ||
|
|
ad7222509d | ||
|
|
12dbbd3dd3 | ||
|
|
ec48298fe8 | ||
|
|
77769de1e6 | ||
|
|
022ae83a4a | ||
|
|
d4f9500ca5 | ||
|
|
9a8892c526 | ||
|
|
813b64ffb1 | ||
|
|
85f5849d0b | ||
|
|
9af242dc47 | ||
|
|
a800a48857 | ||
|
|
4c0ae3df5e | ||
|
|
feb3e1317f | ||
|
|
c2259f14a7 | ||
|
|
b1eeb5f3b8 | ||
|
|
2adf0ca1d1 |
57
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
57
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
name: "\U0001F41B Bug Report"
|
||||||
|
description: Report an issue or possible bug
|
||||||
|
title: "\U0001F41B BUG:"
|
||||||
|
labels: []
|
||||||
|
assignees: []
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
### Thank you for taking the time to file a bug report!
|
||||||
|
|
||||||
|
Please fill out this form as completely as possible.
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: What version of `nebula` are you using?
|
||||||
|
placeholder: 0.0.0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: os
|
||||||
|
attributes:
|
||||||
|
label: What operating system are you using?
|
||||||
|
description: iOS and Android specific issues belong in the [mobile_nebula](https://github.com/DefinedNet/mobile_nebula) repo.
|
||||||
|
placeholder: Linux, Mac, Windows
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Describe the Bug
|
||||||
|
description: A clear and concise description of what the bug is.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Logs from affected hosts
|
||||||
|
description: |
|
||||||
|
Provide logs from all affected hosts during the time of the issue.
|
||||||
|
Improve formatting by using <code>```</code> at the beginning and end of each log block.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: configs
|
||||||
|
attributes:
|
||||||
|
label: Config files from affected hosts
|
||||||
|
description: |
|
||||||
|
Provide config files for all affected hosts.
|
||||||
|
Improve formatting by using <code>```</code> at the beginning and end of each config file.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
13
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
13
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: 📘 Documentation
|
||||||
|
url: https://nebula.defined.net/docs/
|
||||||
|
about: Review documentation.
|
||||||
|
|
||||||
|
- name: 💁 Support/Chat
|
||||||
|
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
|
||||||
|
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
|
||||||
|
|
||||||
|
- name: 📱 Mobile Nebula
|
||||||
|
url: https://github.com/definednet/mobile_nebula
|
||||||
|
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'
|
||||||
22
.github/dependabot.yml
vendored
Normal file
22
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
|
||||||
|
- package-ecosystem: "gomod"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
groups:
|
||||||
|
golang-x-dependencies:
|
||||||
|
patterns:
|
||||||
|
- "golang.org/x/*"
|
||||||
|
zx2c4-dependencies:
|
||||||
|
patterns:
|
||||||
|
- "golang.zx2c4.com/*"
|
||||||
|
protobuf-dependencies:
|
||||||
|
patterns:
|
||||||
|
- "github.com/golang/protobuf"
|
||||||
|
- "google.golang.org/protobuf"
|
||||||
24
.github/workflows/gofmt.yml
vendored
24
.github/workflows/gofmt.yml
vendored
@@ -14,31 +14,21 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v4
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- uses: actions/setup-go@v5
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
go-version-file: 'go.mod'
|
||||||
key: ${{ runner.os }}-gofmt1.18-${{ hashFiles('**/go.sum') }}
|
check-latest: true
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-gofmt1.18-
|
|
||||||
|
|
||||||
- name: Install goimports
|
- name: Install goimports
|
||||||
run: |
|
run: |
|
||||||
go get golang.org/x/tools/cmd/goimports
|
go install golang.org/x/tools/cmd/goimports@latest
|
||||||
go build golang.org/x/tools/cmd/goimports
|
|
||||||
|
|
||||||
- name: gofmt
|
- name: gofmt
|
||||||
run: |
|
run: |
|
||||||
if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -l)" ]
|
if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs goimports -l)" ]
|
||||||
then
|
then
|
||||||
find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -d
|
find . -iname '*.go' | grep -v '\.pb\.go$' | xargs goimports -d
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
251
.github/workflows/release.yml
vendored
251
.github/workflows/release.yml
vendored
@@ -7,25 +7,24 @@ name: Create release and upload binaries
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-linux:
|
build-linux:
|
||||||
name: Build Linux All
|
name: Build Linux/BSD All
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v4
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
|
|
||||||
- name: Checkout code
|
- uses: actions/setup-go@v5
|
||||||
uses: actions/checkout@v2
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd
|
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd release-openbsd release-netbsd
|
||||||
mkdir release
|
mkdir release
|
||||||
mv build/*.tar.gz release
|
mv build/*.tar.gz release
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: linux-latest
|
name: linux-latest
|
||||||
path: release
|
path: release
|
||||||
@@ -34,13 +33,12 @@ jobs:
|
|||||||
name: Build Windows
|
name: Build Windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v4
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
|
|
||||||
- name: Checkout code
|
- uses: actions/setup-go@v5
|
||||||
uses: actions/checkout@v2
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -57,7 +55,7 @@ jobs:
|
|||||||
mv dist\windows\wintun build\dist\windows\
|
mv dist\windows\wintun build\dist\windows\
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: windows-latest
|
name: windows-latest
|
||||||
path: build
|
path: build
|
||||||
@@ -68,17 +66,16 @@ jobs:
|
|||||||
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
||||||
runs-on: macos-11
|
runs-on: macos-11
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v4
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
|
|
||||||
- name: Checkout code
|
- uses: actions/setup-go@v5
|
||||||
uses: actions/checkout@v2
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
- name: Import certificates
|
- name: Import certificates
|
||||||
if: env.HAS_SIGNING_CREDS == 'true'
|
if: env.HAS_SIGNING_CREDS == 'true'
|
||||||
uses: Apple-Actions/import-codesign-certs@v1
|
uses: Apple-Actions/import-codesign-certs@v2
|
||||||
with:
|
with:
|
||||||
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
||||||
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
||||||
@@ -107,7 +104,7 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: darwin-latest
|
name: darwin-latest
|
||||||
path: ./release/*
|
path: ./release/*
|
||||||
@@ -117,12 +114,16 @@ jobs:
|
|||||||
needs: [build-linux, build-darwin, build-windows]
|
needs: [build-linux, build-darwin, build-windows]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: artifacts
|
||||||
|
|
||||||
- name: Zip Windows
|
- name: Zip Windows
|
||||||
run: |
|
run: |
|
||||||
cd windows-latest
|
cd artifacts/windows-latest
|
||||||
cp windows-amd64/* .
|
cp windows-amd64/* .
|
||||||
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
|
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
|
||||||
cp windows-arm64/* .
|
cp windows-arm64/* .
|
||||||
@@ -130,6 +131,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Create sha256sum
|
- name: Create sha256sum
|
||||||
run: |
|
run: |
|
||||||
|
cd artifacts
|
||||||
for dir in linux-latest darwin-latest windows-latest
|
for dir in linux-latest darwin-latest windows-latest
|
||||||
do
|
do
|
||||||
(
|
(
|
||||||
@@ -159,195 +161,12 @@ jobs:
|
|||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
id: create_release
|
id: create_release
|
||||||
uses: actions/create-release@v1
|
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
run: |
|
||||||
tag_name: ${{ github.ref }}
|
cd artifacts
|
||||||
release_name: Release ${{ github.ref }}
|
gh release create \
|
||||||
draft: false
|
--verify-tag \
|
||||||
prerelease: false
|
--title "Release ${{ github.ref_name }}" \
|
||||||
|
"${{ github.ref_name }}" \
|
||||||
##
|
SHASUM256.txt *-latest/*.zip *-latest/*.tar.gz
|
||||||
## Upload assets (I wish we could just upload the whole folder at once...
|
|
||||||
##
|
|
||||||
|
|
||||||
- name: Upload SHASUM256.txt
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./SHASUM256.txt
|
|
||||||
asset_name: SHASUM256.txt
|
|
||||||
asset_content_type: text/plain
|
|
||||||
|
|
||||||
- name: Upload darwin zip
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./darwin-latest/nebula-darwin.zip
|
|
||||||
asset_name: nebula-darwin.zip
|
|
||||||
asset_content_type: application/zip
|
|
||||||
|
|
||||||
- name: Upload windows-amd64
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./windows-latest/nebula-windows-amd64.zip
|
|
||||||
asset_name: nebula-windows-amd64.zip
|
|
||||||
asset_content_type: application/zip
|
|
||||||
|
|
||||||
- name: Upload windows-arm64
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./windows-latest/nebula-windows-arm64.zip
|
|
||||||
asset_name: nebula-windows-arm64.zip
|
|
||||||
asset_content_type: application/zip
|
|
||||||
|
|
||||||
- name: Upload linux-amd64
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-amd64.tar.gz
|
|
||||||
asset_name: nebula-linux-amd64.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-386
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-386.tar.gz
|
|
||||||
asset_name: nebula-linux-386.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-ppc64le
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-ppc64le.tar.gz
|
|
||||||
asset_name: nebula-linux-ppc64le.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-arm-5
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-arm-5.tar.gz
|
|
||||||
asset_name: nebula-linux-arm-5.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-arm-6
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-arm-6.tar.gz
|
|
||||||
asset_name: nebula-linux-arm-6.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-arm-7
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-arm-7.tar.gz
|
|
||||||
asset_name: nebula-linux-arm-7.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-arm64
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-arm64.tar.gz
|
|
||||||
asset_name: nebula-linux-arm64.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-mips
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-mips.tar.gz
|
|
||||||
asset_name: nebula-linux-mips.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-mipsle
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-mipsle.tar.gz
|
|
||||||
asset_name: nebula-linux-mipsle.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-mips64
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-mips64.tar.gz
|
|
||||||
asset_name: nebula-linux-mips64.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-mips64le
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
|
|
||||||
asset_name: nebula-linux-mips64le.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-mips-softfloat
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-mips-softfloat.tar.gz
|
|
||||||
asset_name: nebula-linux-mips-softfloat.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload linux-riscv64
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
|
|
||||||
asset_name: nebula-linux-riscv64.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|
||||||
- name: Upload freebsd-amd64
|
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./linux-latest/nebula-freebsd-amd64.tar.gz
|
|
||||||
asset_name: nebula-freebsd-amd64.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
|
|||||||
27
.github/workflows/smoke.yml
vendored
27
.github/workflows/smoke.yml
vendored
@@ -18,24 +18,15 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v4
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- uses: actions/setup-go@v5
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
go-version-file: 'go.mod'
|
||||||
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
|
check-latest: true
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go1.18-
|
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
run: make bin-docker
|
run: make bin-docker CGO_ENABLED=1 BUILD_ARGS=-race
|
||||||
|
|
||||||
- name: setup docker image
|
- name: setup docker image
|
||||||
working-directory: ./.github/workflows/smoke
|
working-directory: ./.github/workflows/smoke
|
||||||
@@ -53,4 +44,12 @@ jobs:
|
|||||||
working-directory: ./.github/workflows/smoke
|
working-directory: ./.github/workflows/smoke
|
||||||
run: ./smoke-relay.sh
|
run: ./smoke-relay.sh
|
||||||
|
|
||||||
|
- name: setup docker image for P256
|
||||||
|
working-directory: ./.github/workflows/smoke
|
||||||
|
run: NAME="smoke-p256" CURVE=P256 ./build.sh
|
||||||
|
|
||||||
|
- name: run smoke-p256
|
||||||
|
working-directory: ./.github/workflows/smoke
|
||||||
|
run: NAME="smoke-p256" ./smoke.sh
|
||||||
|
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
|
|||||||
4
.github/workflows/smoke/Dockerfile
vendored
4
.github/workflows/smoke/Dockerfile
vendored
@@ -1,4 +1,6 @@
|
|||||||
FROM debian:buster
|
FROM ubuntu:jammy
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y iputils-ping ncat tcpdump
|
||||||
|
|
||||||
ADD ./build /nebula
|
ADD ./build /nebula
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/smoke/build-relay.sh
vendored
2
.github/workflows/smoke/build-relay.sh
vendored
@@ -41,4 +41,4 @@ EOF
|
|||||||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||||
)
|
)
|
||||||
|
|
||||||
sudo docker build -t nebula:smoke-relay .
|
docker build -t nebula:smoke-relay .
|
||||||
|
|||||||
4
.github/workflows/smoke/build.sh
vendored
4
.github/workflows/smoke/build.sh
vendored
@@ -29,11 +29,11 @@ mkdir ./build
|
|||||||
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||||
../genconfig.sh >host4.yml
|
../genconfig.sh >host4.yml
|
||||||
|
|
||||||
../../../../nebula-cert ca -name "Smoke Test"
|
../../../../nebula-cert ca -curve "${CURVE:-25519}" -name "Smoke Test"
|
||||||
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
|
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
|
||||||
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
|
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
|
||||||
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
|
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
|
||||||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||||
)
|
)
|
||||||
|
|
||||||
sudo docker build -t nebula:smoke .
|
docker build -t "nebula:${NAME:-smoke}" .
|
||||||
|
|||||||
2
.github/workflows/smoke/genconfig.sh
vendored
2
.github/workflows/smoke/genconfig.sh
vendored
@@ -50,6 +50,8 @@ tun:
|
|||||||
dev: ${TUN_DEV:-nebula1}
|
dev: ${TUN_DEV:-nebula1}
|
||||||
|
|
||||||
firewall:
|
firewall:
|
||||||
|
inbound_action: reject
|
||||||
|
outbound_action: reject
|
||||||
outbound: ${OUTBOUND:-$FIREWALL_ALL}
|
outbound: ${OUTBOUND:-$FIREWALL_ALL}
|
||||||
inbound: ${INBOUND:-$FIREWALL_ALL}
|
inbound: ${INBOUND:-$FIREWALL_ALL}
|
||||||
|
|
||||||
|
|||||||
50
.github/workflows/smoke/smoke-relay.sh
vendored
50
.github/workflows/smoke/smoke-relay.sh
vendored
@@ -14,24 +14,24 @@ cleanup() {
|
|||||||
set +e
|
set +e
|
||||||
if [ "$(jobs -r)" ]
|
if [ "$(jobs -r)" ]
|
||||||
then
|
then
|
||||||
sudo docker kill lighthouse1 host2 host3 host4
|
docker kill lighthouse1 host2 host3 host4
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
|
|
||||||
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
||||||
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
||||||
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
||||||
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
||||||
|
|
||||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
@@ -39,43 +39,43 @@ echo
|
|||||||
echo " *** Testing ping from lighthouse1"
|
echo " *** Testing ping from lighthouse1"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||||
sudo docker exec lighthouse1 ping -c1 192.168.100.4
|
docker exec lighthouse1 ping -c1 192.168.100.4
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host2"
|
echo " *** Testing ping from host2"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec host2 ping -c1 192.168.100.1
|
docker exec host2 ping -c1 192.168.100.1
|
||||||
# Should fail because no relay configured in this direction
|
# Should fail because no relay configured in this direction
|
||||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
! docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host3"
|
echo " *** Testing ping from host3"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec host3 ping -c1 192.168.100.1
|
docker exec host3 ping -c1 192.168.100.1
|
||||||
sudo docker exec host3 ping -c1 192.168.100.2
|
docker exec host3 ping -c1 192.168.100.2
|
||||||
sudo docker exec host3 ping -c1 192.168.100.4
|
docker exec host3 ping -c1 192.168.100.4
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host4"
|
echo " *** Testing ping from host4"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec host4 ping -c1 192.168.100.1
|
docker exec host4 ping -c1 192.168.100.1
|
||||||
# Should fail because relays not allowed
|
# Should fail because relays not allowed
|
||||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||||
sudo docker exec host4 ping -c1 192.168.100.3
|
docker exec host4 ping -c1 192.168.100.3
|
||||||
|
|
||||||
sudo docker exec host4 sh -c 'kill 1'
|
docker exec host4 sh -c 'kill 1'
|
||||||
sudo docker exec host3 sh -c 'kill 1'
|
docker exec host3 sh -c 'kill 1'
|
||||||
sudo docker exec host2 sh -c 'kill 1'
|
docker exec host2 sh -c 'kill 1'
|
||||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
docker exec lighthouse1 sh -c 'kill 1'
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
if [ "$(jobs -r)" ]
|
if [ "$(jobs -r)" ]
|
||||||
|
|||||||
95
.github/workflows/smoke/smoke.sh
vendored
95
.github/workflows/smoke/smoke.sh
vendored
@@ -14,60 +14,105 @@ cleanup() {
|
|||||||
set +e
|
set +e
|
||||||
if [ "$(jobs -r)" ]
|
if [ "$(jobs -r)" ]
|
||||||
then
|
then
|
||||||
sudo docker kill lighthouse1 host2 host3 host4
|
docker kill lighthouse1 host2 host3 host4
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
|
|
||||||
sudo docker run --name lighthouse1 --rm nebula:smoke -config lighthouse1.yml -test
|
CONTAINER="nebula:${NAME:-smoke}"
|
||||||
sudo docker run --name host2 --rm nebula:smoke -config host2.yml -test
|
|
||||||
sudo docker run --name host3 --rm nebula:smoke -config host3.yml -test
|
|
||||||
sudo docker run --name host4 --rm nebula:smoke -config host4.yml -test
|
|
||||||
|
|
||||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||||
|
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||||
|
docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||||
|
docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||||
|
|
||||||
|
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
|
# grab tcpdump pcaps for debugging
|
||||||
|
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||||
|
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||||
|
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||||
|
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||||
|
docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||||
|
docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||||
|
docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||||
|
docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||||
|
|
||||||
|
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||||
|
docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||||
|
docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||||
|
docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from lighthouse1"
|
echo " *** Testing ping from lighthouse1"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host2"
|
echo " *** Testing ping from host2"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec host2 ping -c1 192.168.100.1
|
docker exec host2 ping -c1 192.168.100.1
|
||||||
# Should fail because not allowed by host3 inbound firewall
|
# Should fail because not allowed by host3 inbound firewall
|
||||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing ncat from host2"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
# Should fail because not allowed by host3 inbound firewall
|
||||||
|
! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||||
|
! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host3"
|
echo " *** Testing ping from host3"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec host3 ping -c1 192.168.100.1
|
docker exec host3 ping -c1 192.168.100.1
|
||||||
sudo docker exec host3 ping -c1 192.168.100.2
|
docker exec host3 ping -c1 192.168.100.2
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing ncat from host3"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||||
|
docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host4"
|
echo " *** Testing ping from host4"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
sudo docker exec host4 ping -c1 192.168.100.1
|
docker exec host4 ping -c1 192.168.100.1
|
||||||
# Should fail because not allowed by host4 outbound firewall
|
# Should fail because not allowed by host4 outbound firewall
|
||||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||||
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
! docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing ncat from host4"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
# Should fail because not allowed by host4 outbound firewall
|
||||||
|
! docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||||
|
! docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||||
|
! docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||||
|
! docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
@@ -75,15 +120,15 @@ echo " *** Testing conntrack"
|
|||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
# host2 can ping host3 now that host3 pinged it first
|
# host2 can ping host3 now that host3 pinged it first
|
||||||
sudo docker exec host2 ping -c1 192.168.100.3
|
docker exec host2 ping -c1 192.168.100.3
|
||||||
# host4 can ping host2 once conntrack established
|
# host4 can ping host2 once conntrack established
|
||||||
sudo docker exec host2 ping -c1 192.168.100.4
|
docker exec host2 ping -c1 192.168.100.4
|
||||||
sudo docker exec host4 ping -c1 192.168.100.2
|
docker exec host4 ping -c1 192.168.100.2
|
||||||
|
|
||||||
sudo docker exec host4 sh -c 'kill 1'
|
docker exec host4 sh -c 'kill 1'
|
||||||
sudo docker exec host3 sh -c 'kill 1'
|
docker exec host3 sh -c 'kill 1'
|
||||||
sudo docker exec host2 sh -c 'kill 1'
|
docker exec host2 sh -c 'kill 1'
|
||||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
docker exec lighthouse1 sh -c 'kill 1'
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
if [ "$(jobs -r)" ]
|
if [ "$(jobs -r)" ]
|
||||||
|
|||||||
122
.github/workflows/test.yml
vendored
122
.github/workflows/test.yml
vendored
@@ -18,25 +18,79 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v4
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- uses: actions/setup-go@v5
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
go-version-file: 'go.mod'
|
||||||
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
|
check-latest: true
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go1.18-
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make all
|
run: make all
|
||||||
|
|
||||||
|
- name: Vet
|
||||||
|
run: make vet
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: make test
|
||||||
|
|
||||||
|
- name: End 2 end
|
||||||
|
run: make e2evv
|
||||||
|
|
||||||
|
- name: Build test mobile
|
||||||
|
run: make build-test-mobile
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: e2e packet flow
|
||||||
|
path: e2e/mermaid/
|
||||||
|
if-no-files-found: warn
|
||||||
|
|
||||||
|
test-linux-boringcrypto:
|
||||||
|
name: Build and test on linux with boringcrypto
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: make bin-boringcrypto
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: make test-boringcrypto
|
||||||
|
|
||||||
|
- name: End 2 end
|
||||||
|
run: make e2evv GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
|
|
||||||
|
test:
|
||||||
|
name: Build and test on ${{ matrix.os }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [windows-latest, macos-11]
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
|
- name: Build nebula
|
||||||
|
run: go build ./cmd/nebula
|
||||||
|
|
||||||
|
- name: Build nebula-cert
|
||||||
|
run: go build ./cmd/nebula-cert
|
||||||
|
|
||||||
|
- name: Vet
|
||||||
|
run: make vet
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
@@ -48,45 +102,3 @@ jobs:
|
|||||||
name: e2e packet flow
|
name: e2e packet flow
|
||||||
path: e2e/mermaid/
|
path: e2e/mermaid/
|
||||||
if-no-files-found: warn
|
if-no-files-found: warn
|
||||||
|
|
||||||
test:
|
|
||||||
name: Build and test on ${{ matrix.os }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [windows-latest, macos-11]
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go1.18-
|
|
||||||
|
|
||||||
- name: Build nebula
|
|
||||||
run: go build ./cmd/nebula
|
|
||||||
|
|
||||||
- name: Build nebula-cert
|
|
||||||
run: go build ./cmd/nebula-cert
|
|
||||||
|
|
||||||
- name: Test
|
|
||||||
run: go test -v ./...
|
|
||||||
|
|
||||||
- name: End 2 end
|
|
||||||
run: make e2evv
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: e2e packet flow
|
|
||||||
path: e2e/mermaid/
|
|
||||||
if-no-files-found: warn
|
|
||||||
|
|||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -4,10 +4,14 @@
|
|||||||
/nebula-arm6
|
/nebula-arm6
|
||||||
/nebula-darwin
|
/nebula-darwin
|
||||||
/nebula.exe
|
/nebula.exe
|
||||||
/cert/*.crt
|
/nebula-cert.exe
|
||||||
/cert/*.key
|
|
||||||
/coverage.out
|
/coverage.out
|
||||||
/cpu.pprof
|
/cpu.pprof
|
||||||
/build
|
/build
|
||||||
/*.tar.gz
|
/*.tar.gz
|
||||||
/e2e/mermaid/
|
/e2e/mermaid/
|
||||||
|
**.crt
|
||||||
|
**.key
|
||||||
|
**.pem
|
||||||
|
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key
|
||||||
|
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt
|
||||||
|
|||||||
182
CHANGELOG.md
182
CHANGELOG.md
@@ -7,6 +7,179 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [1.8.2] - 2024-01-08
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix multiple routines when listen.port is zero. This was a regression
|
||||||
|
introduced in v1.6.0. (#1057)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Small dependency update for Noise. (#1038)
|
||||||
|
|
||||||
|
## [1.8.1] - 2023-12-19
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
- Update `golang.org/x/crypto`, which includes a fix for CVE-2023-48795. (#1048)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix a deadlock introduced in v1.8.0 that could occur during handshakes. (#1044)
|
||||||
|
|
||||||
|
- Fix mobile builds. (#1035)
|
||||||
|
|
||||||
|
## [1.8.0] - 2023-12-06
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
|
||||||
|
- The next minor release of Nebula, 1.9.0, will require at least Windows 10 or
|
||||||
|
Windows Server 2016. This is because support for earlier versions was removed
|
||||||
|
in Go 1.21. See https://go.dev/doc/go1.21#windows
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Linux: Notify systemd of service readiness. This should resolve timing issues
|
||||||
|
with services that depend on Nebula being active. For an example of how to
|
||||||
|
enable this, see: `examples/service_scripts/nebula.service`. (#929)
|
||||||
|
|
||||||
|
- Windows: Use Registered IO (RIO) when possible. Testing on a Windows 11
|
||||||
|
machine shows ~50x improvement in throughput. (#905)
|
||||||
|
|
||||||
|
- NetBSD, OpenBSD: Added rudimentary support. (#916, #812)
|
||||||
|
|
||||||
|
- FreeBSD: Add support for naming tun devices. (#903)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `pki.disconnect_invalid` will now default to true. This means that once a
|
||||||
|
certificate expires, the tunnel will be disconnected. If you use SIGHUP to
|
||||||
|
reload certificates without restarting Nebula, you should ensure all of your
|
||||||
|
clients are on 1.7.0 or newer before you enable this feature. (#859)
|
||||||
|
|
||||||
|
- Limit how often a busy tunnel can requery the lighthouse. The new config
|
||||||
|
option `timers.requery_wait_duration` defaults to `60s`. (#940)
|
||||||
|
|
||||||
|
- The internal structures for hostmaps were refactored to reduce memory usage
|
||||||
|
and the potential for subtle bugs. (#843, #938, #953, #954, #955)
|
||||||
|
|
||||||
|
- Lots of dependency updates.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Windows: Retry wintun device creation if it fails the first time. (#985)
|
||||||
|
|
||||||
|
- Fix issues with firewall reject packets that could cause panics. (#957)
|
||||||
|
|
||||||
|
- Fix relay migration during re-handshakes. (#964)
|
||||||
|
|
||||||
|
- Various other refactors and fixes. (#935, #952, #972, #961, #996, #1002,
|
||||||
|
#987, #1004, #1030, #1032, ...)
|
||||||
|
|
||||||
|
## [1.7.2] - 2023-06-01
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix a freeze during config reload if the `static_host_map` config was changed. (#886)
|
||||||
|
|
||||||
|
## [1.7.1] - 2023-05-18
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix IPv4 addresses returned by `static_host_map` DNS lookup queries being
|
||||||
|
treated as IPv6 addresses. (#877)
|
||||||
|
|
||||||
|
## [1.7.0] - 2023-05-17
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `nebula-cert ca` now supports encrypting the CA's private key with a
|
||||||
|
passphrase. Pass `-encrypt` in order to be prompted for a passphrase.
|
||||||
|
Encryption is performed using AES-256-GCM and Argon2id for KDF. KDF
|
||||||
|
parameters default to RFC recommendations, but can be overridden via CLI
|
||||||
|
flags `-argon-memory`, `-argon-parallelism`, and `-argon-iterations`. (#386)
|
||||||
|
|
||||||
|
- Support for curve P256 and BoringCrypto has been added. See README section
|
||||||
|
"Curve P256 and BoringCrypto" for more details. (#865, #861, #769, #856, #803)
|
||||||
|
|
||||||
|
- New firewall rule `local_cidr`. This could be used to filter destinations
|
||||||
|
when using `unsafe_routes`. (#507)
|
||||||
|
|
||||||
|
- Add `unsafe_route` option `install`. This controls whether the route is
|
||||||
|
installed in the systems routing table. (#831)
|
||||||
|
|
||||||
|
- Add `tun.use_system_route_table` option. Set to true to manage unsafe routes
|
||||||
|
directly on the system route table with gateway routes instead of in Nebula
|
||||||
|
configuration files. This is only supported on Linux. (#839)
|
||||||
|
|
||||||
|
- The metric `certificate.ttl_seconds` is now exposed via stats. (#782)
|
||||||
|
|
||||||
|
- Add `punchy.respond_delay` option. This allows you to change the delay
|
||||||
|
before attempting punchy.respond. Default is 5 seconds. (#721)
|
||||||
|
|
||||||
|
- Added SSH commands to allow the capture of a mutex profile. (#737)
|
||||||
|
|
||||||
|
- You can now set `lighthouse.calculated_remotes` to make it possible to do
|
||||||
|
handshakes without a lighthouse in certain configurations. (#759)
|
||||||
|
|
||||||
|
- The firewall can be configured to send REJECT replies instead of the default
|
||||||
|
DROP behavior. (#738)
|
||||||
|
|
||||||
|
- For macOS, an example launchd configuration file is now provided. (#762)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Lighthouses and other `static_host_map` entries that use DNS names will now
|
||||||
|
be automatically refreshed to detect when the IP address changes. (#796)
|
||||||
|
|
||||||
|
- Lighthouses send ACK replies back to clients so that they do not fall into
|
||||||
|
connection testing as often by clients. (#851, #408)
|
||||||
|
|
||||||
|
- Allow the `listen.host` option to contain a hostname. (#825)
|
||||||
|
|
||||||
|
- When Nebula switches to a new certificate (such as via SIGHUP), we now
|
||||||
|
rehandshake with all existing tunnels. This allows firewall groups to be
|
||||||
|
updated and `pki.disconnect_invalid` to know about the new certificate
|
||||||
|
expiration time. (#838, #857, #842, #840, #835, #828, #820, #807)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Always disconnect blocklisted hosts, even if `pki.disconnect_invalid` is
|
||||||
|
not set. (#858)
|
||||||
|
|
||||||
|
- Dependencies updated and go1.20 required. (#780, #824, #855, #854)
|
||||||
|
|
||||||
|
- Fix possible race condition with relays. (#827)
|
||||||
|
|
||||||
|
- FreeBSD: Fix connection to the localhost's own Nebula IP. (#808)
|
||||||
|
|
||||||
|
- Normalize and document some common log field values. (#837, #811)
|
||||||
|
|
||||||
|
- Fix crash if you set unlucky values for the firewall timeout configuration
|
||||||
|
options. (#802)
|
||||||
|
|
||||||
|
- Make DNS queries case insensitive. (#793)
|
||||||
|
|
||||||
|
- Update example systemd configurations to want `nss-lookup`. (#791)
|
||||||
|
|
||||||
|
- Errors with SSH commands now go to the SSH tunnel instead of stderr. (#757)
|
||||||
|
|
||||||
|
- Fix a hang when shutting down Android. (#772)
|
||||||
|
|
||||||
|
## [1.6.1] - 2022-09-26
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Refuse to process underlay packets received from overlay IPs. This prevents
|
||||||
|
confusion on hosts that have unsafe routes configured. (#741)
|
||||||
|
|
||||||
|
- The ssh `reload` command did not work on Windows, since it relied on sending
|
||||||
|
a SIGHUP signal internally. This has been fixed. (#725)
|
||||||
|
|
||||||
|
- A regression in v1.5.2 that broke unsafe routes on Mobile clients has been
|
||||||
|
fixed. (#729)
|
||||||
|
|
||||||
## [1.6.0] - 2022-06-30
|
## [1.6.0] - 2022-06-30
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@@ -385,7 +558,14 @@ created.)
|
|||||||
|
|
||||||
- Initial public release.
|
- Initial public release.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.6.0...HEAD
|
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.8.2...HEAD
|
||||||
|
[1.8.2]: https://github.com/slackhq/nebula/releases/tag/v1.8.2
|
||||||
|
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
|
||||||
|
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
|
||||||
|
[1.7.2]: https://github.com/slackhq/nebula/releases/tag/v1.7.2
|
||||||
|
[1.7.1]: https://github.com/slackhq/nebula/releases/tag/v1.7.1
|
||||||
|
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
|
||||||
|
[1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1
|
||||||
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0
|
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0
|
||||||
[1.5.2]: https://github.com/slackhq/nebula/releases/tag/v1.5.2
|
[1.5.2]: https://github.com/slackhq/nebula/releases/tag/v1.5.2
|
||||||
[1.5.0]: https://github.com/slackhq/nebula/releases/tag/v1.5.0
|
[1.5.0]: https://github.com/slackhq/nebula/releases/tag/v1.5.0
|
||||||
|
|||||||
38
LOGGING.md
Normal file
38
LOGGING.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
### Logging conventions
|
||||||
|
|
||||||
|
A log message (the string/format passed to `Info`, `Error`, `Debug` etc, as well as their `Sprintf` counterparts) should
|
||||||
|
be a descriptive message about the event and may contain specific identifying characteristics. Regardless of the
|
||||||
|
level of detail in the message identifying characteristics should always be included via `WithField`, `WithFields` or
|
||||||
|
`WithError`
|
||||||
|
|
||||||
|
If an error is being logged use `l.WithError(err)` so that there is better discoverability about the event as well
|
||||||
|
as the specific error condition.
|
||||||
|
|
||||||
|
#### Common fields
|
||||||
|
|
||||||
|
- `cert` - a `cert.NebulaCertificate` object, do not `.String()` this manually, `logrus` will marshal objects properly
|
||||||
|
for the formatter it is using.
|
||||||
|
- `fingerprint` - a single `NebeulaCertificate` hex encoded fingerprint
|
||||||
|
- `fingerprints` - an array of `NebulaCertificate` hex encoded fingerprints
|
||||||
|
- `fwPacket` - a FirewallPacket object
|
||||||
|
- `handshake` - an object containing:
|
||||||
|
- `stage` - the current stage counter
|
||||||
|
- `style` - noise handshake style `ix_psk0`, `xx`, etc
|
||||||
|
- `header` - a nebula header object
|
||||||
|
- `udpAddr` - a `net.UDPAddr` object
|
||||||
|
- `udpIp` - a udp ip address
|
||||||
|
- `vpnIp` - vpn ip of the host (remote or local)
|
||||||
|
- `relay` - the vpnIp of the relay host that is or should be handling the relay packet
|
||||||
|
- `relayFrom` - The vpnIp of the initial sender of the relayed packet
|
||||||
|
- `relayTo` - The vpnIp of the final destination of a relayed packet
|
||||||
|
|
||||||
|
#### Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
l.WithError(err).
|
||||||
|
WithField("vpnIp", IntIp(hostinfo.hostId)).
|
||||||
|
WithField("udpAddr", addr).
|
||||||
|
WithField("handshake", m{"stage": 1, "style": "ix"}).
|
||||||
|
WithField("cert", remoteCert).
|
||||||
|
Info("Invalid certificate from host")
|
||||||
|
```
|
||||||
54
Makefile
54
Makefile
@@ -1,4 +1,4 @@
|
|||||||
GOMINVERSION = 1.18
|
GOMINVERSION = 1.20
|
||||||
NEBULA_CMD_PATH = "./cmd/nebula"
|
NEBULA_CMD_PATH = "./cmd/nebula"
|
||||||
GO111MODULE = on
|
GO111MODULE = on
|
||||||
export GO111MODULE
|
export GO111MODULE
|
||||||
@@ -12,6 +12,8 @@ ifeq ($(OS),Windows_NT)
|
|||||||
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
|
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
|
||||||
NEBULA_CMD_SUFFIX = .exe
|
NEBULA_CMD_SUFFIX = .exe
|
||||||
NULL_FILE = nul
|
NULL_FILE = nul
|
||||||
|
# RIO on windows does pointer stuff that makes go vet angry
|
||||||
|
VET_FLAGS = -unsafeptr=false
|
||||||
else
|
else
|
||||||
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
|
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
|
||||||
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
|
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
|
||||||
@@ -44,10 +46,21 @@ ALL_LINUX = linux-amd64 \
|
|||||||
linux-mips-softfloat \
|
linux-mips-softfloat \
|
||||||
linux-riscv64
|
linux-riscv64
|
||||||
|
|
||||||
|
ALL_FREEBSD = freebsd-amd64 \
|
||||||
|
freebsd-arm64
|
||||||
|
|
||||||
|
ALL_OPENBSD = openbsd-amd64 \
|
||||||
|
openbsd-arm64
|
||||||
|
|
||||||
|
ALL_NETBSD = netbsd-amd64 \
|
||||||
|
netbsd-arm64
|
||||||
|
|
||||||
ALL = $(ALL_LINUX) \
|
ALL = $(ALL_LINUX) \
|
||||||
|
$(ALL_FREEBSD) \
|
||||||
|
$(ALL_OPENBSD) \
|
||||||
|
$(ALL_NETBSD) \
|
||||||
darwin-amd64 \
|
darwin-amd64 \
|
||||||
darwin-arm64 \
|
darwin-arm64 \
|
||||||
freebsd-amd64 \
|
|
||||||
windows-amd64 \
|
windows-amd64 \
|
||||||
windows-arm64
|
windows-arm64
|
||||||
|
|
||||||
@@ -66,13 +79,22 @@ e2evvv: e2ev
|
|||||||
e2evvvv: TEST_ENV += TEST_LOGS=3
|
e2evvvv: TEST_ENV += TEST_LOGS=3
|
||||||
e2evvvv: e2ev
|
e2evvvv: e2ev
|
||||||
|
|
||||||
|
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
|
||||||
|
e2e-bench: e2e
|
||||||
|
|
||||||
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
||||||
|
|
||||||
release: $(ALL:%=build/nebula-%.tar.gz)
|
release: $(ALL:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
release-freebsd: build/nebula-freebsd-amd64.tar.gz
|
release-freebsd: $(ALL_FREEBSD:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
|
release-openbsd: $(ALL_OPENBSD:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
|
release-netbsd: $(ALL_NETBSD:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
|
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
|
||||||
|
|
||||||
BUILD_ARGS = -trimpath
|
BUILD_ARGS = -trimpath
|
||||||
|
|
||||||
@@ -88,6 +110,12 @@ bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
|
|||||||
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
|
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
|
||||||
mv $? .
|
mv $? .
|
||||||
|
|
||||||
|
bin-freebsd-arm64: build/freebsd-arm64/nebula build/freebsd-arm64/nebula-cert
|
||||||
|
mv $? .
|
||||||
|
|
||||||
|
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
|
||||||
|
mv $? .
|
||||||
|
|
||||||
bin:
|
bin:
|
||||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
||||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
||||||
@@ -102,6 +130,10 @@ build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
|
|||||||
# Build an extra small binary for mips-softfloat
|
# Build an extra small binary for mips-softfloat
|
||||||
build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
||||||
|
|
||||||
|
# boringcrypto
|
||||||
|
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
|
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
|
|
||||||
build/%/nebula: .FORCE
|
build/%/nebula: .FORCE
|
||||||
GOOS=$(firstword $(subst -, , $*)) \
|
GOOS=$(firstword $(subst -, , $*)) \
|
||||||
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
|
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
|
||||||
@@ -125,15 +157,24 @@ build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
|
|||||||
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
|
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
|
||||||
|
|
||||||
vet:
|
vet:
|
||||||
go vet -v ./...
|
go vet $(VET_FLAGS) -v ./...
|
||||||
|
|
||||||
test:
|
test:
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
|
|
||||||
|
test-boringcrypto:
|
||||||
|
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -v ./...
|
||||||
|
|
||||||
test-cov-html:
|
test-cov-html:
|
||||||
go test -coverprofile=coverage.out
|
go test -coverprofile=coverage.out
|
||||||
go tool cover -html=coverage.out
|
go tool cover -html=coverage.out
|
||||||
|
|
||||||
|
build-test-mobile:
|
||||||
|
GOARCH=amd64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||||
|
GOARCH=arm64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||||
|
GOARCH=amd64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||||
|
GOARCH=arm64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||||
|
|
||||||
bench:
|
bench:
|
||||||
go test -bench=.
|
go test -bench=.
|
||||||
|
|
||||||
@@ -167,14 +208,17 @@ bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
|||||||
smoke-docker: bin-docker
|
smoke-docker: bin-docker
|
||||||
cd .github/workflows/smoke/ && ./build.sh
|
cd .github/workflows/smoke/ && ./build.sh
|
||||||
cd .github/workflows/smoke/ && ./smoke.sh
|
cd .github/workflows/smoke/ && ./smoke.sh
|
||||||
|
cd .github/workflows/smoke/ && NAME="smoke-p256" CURVE="P256" ./build.sh
|
||||||
|
cd .github/workflows/smoke/ && NAME="smoke-p256" ./smoke.sh
|
||||||
|
|
||||||
smoke-relay-docker: bin-docker
|
smoke-relay-docker: bin-docker
|
||||||
cd .github/workflows/smoke/ && ./build-relay.sh
|
cd .github/workflows/smoke/ && ./build-relay.sh
|
||||||
cd .github/workflows/smoke/ && ./smoke-relay.sh
|
cd .github/workflows/smoke/ && ./smoke-relay.sh
|
||||||
|
|
||||||
smoke-docker-race: BUILD_ARGS = -race
|
smoke-docker-race: BUILD_ARGS = -race
|
||||||
|
smoke-docker-race: CGO_ENABLED = 1
|
||||||
smoke-docker-race: smoke-docker
|
smoke-docker-race: smoke-docker
|
||||||
|
|
||||||
.FORCE:
|
.FORCE:
|
||||||
.PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race
|
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html
|
||||||
.DEFAULT_GOAL := bin
|
.DEFAULT_GOAL := bin
|
||||||
|
|||||||
40
README.md
40
README.md
@@ -8,7 +8,7 @@ and tunneling, and each of those individual pieces existed before Nebula in vari
|
|||||||
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
||||||
resulting in a sum that is greater than its individual parts.
|
resulting in a sum that is greater than its individual parts.
|
||||||
|
|
||||||
Further documentation can be found [here](https://www.defined.net/nebula/).
|
Further documentation can be found [here](https://nebula.defined.net/docs/).
|
||||||
|
|
||||||
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
||||||
|
|
||||||
@@ -27,16 +27,31 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
|||||||
|
|
||||||
#### Distribution Packages
|
#### Distribution Packages
|
||||||
|
|
||||||
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/)
|
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
|
||||||
```
|
```
|
||||||
$ sudo pacman -S nebula
|
$ sudo pacman -S nebula
|
||||||
```
|
```
|
||||||
- [Fedora Linux](https://copr.fedorainfracloud.org/coprs/jdoss/nebula/)
|
|
||||||
|
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
||||||
```
|
```
|
||||||
$ sudo dnf copr enable jdoss/nebula
|
|
||||||
$ sudo dnf install nebula
|
$ sudo dnf install nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
|
||||||
|
```
|
||||||
|
$ sudo apt install nebula
|
||||||
|
```
|
||||||
|
|
||||||
|
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
|
||||||
|
```
|
||||||
|
$ sudo apk add nebula
|
||||||
|
```
|
||||||
|
|
||||||
|
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
|
||||||
|
```
|
||||||
|
$ brew install nebula
|
||||||
|
```
|
||||||
|
|
||||||
#### Mobile
|
#### Mobile
|
||||||
|
|
||||||
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
||||||
@@ -93,18 +108,18 @@ Download a copy of the nebula [example configuration](https://github.com/slackhq
|
|||||||
|
|
||||||
#### 6. Copy nebula credentials, configuration, and binaries to each host
|
#### 6. Copy nebula credentials, configuration, and binaries to each host
|
||||||
|
|
||||||
For each host, copy the nebula binary to the host, along with `config.yaml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4.
|
For each host, copy the nebula binary to the host, along with `config.yml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4.
|
||||||
|
|
||||||
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
|
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
|
||||||
|
|
||||||
#### 7. Run nebula on each host
|
#### 7. Run nebula on each host
|
||||||
```
|
```
|
||||||
./nebula -config /path/to/config.yaml
|
./nebula -config /path/to/config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Building Nebula from source
|
## Building Nebula from source
|
||||||
|
|
||||||
Download go and clone this repo. Change to the nebula directory.
|
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
|
||||||
|
|
||||||
To build nebula for all platforms:
|
To build nebula for all platforms:
|
||||||
`make all`
|
`make all`
|
||||||
@@ -114,6 +129,17 @@ To build nebula for a specific platform (ex, Windows):
|
|||||||
|
|
||||||
See the [Makefile](Makefile) for more details on build targets
|
See the [Makefile](Makefile) for more details on build targets
|
||||||
|
|
||||||
|
## Curve P256 and BoringCrypto
|
||||||
|
|
||||||
|
The default curve used for cryptographic handshakes and signatures is Curve25519. This is the recommended setting for most users. If your deployment has certain compliance requirements, you have the option of creating your CA using `nebula-cert ca -curve P256` to use NIST Curve P256. The CA will then sign certificates using ECDSA P256, and any hosts using these certificates will use P256 for ECDH handshakes.
|
||||||
|
|
||||||
|
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
|
||||||
|
|
||||||
|
make bin-boringcrypto
|
||||||
|
make release-boringcrypto
|
||||||
|
|
||||||
|
This is not the recommended default deployment, but may be useful based on your compliance requirements.
|
||||||
|
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.
|
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.
|
||||||
|
|||||||
12
SECURITY.md
Normal file
12
SECURITY.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Security Policy
|
||||||
|
===============
|
||||||
|
|
||||||
|
Reporting a Vulnerability
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
If you believe you have found a security vulnerability with Nebula, please let
|
||||||
|
us know right away. We will investigate all reports and do our best to quickly
|
||||||
|
fix valid issues.
|
||||||
|
|
||||||
|
You can submit your report on [HackerOne](https://hackerone.com/slack) and our
|
||||||
|
security team will respond as soon as possible.
|
||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
|
|
||||||
type AllowList struct {
|
type AllowList struct {
|
||||||
// The values of this cidrTree are `bool`, signifying allow/deny
|
// The values of this cidrTree are `bool`, signifying allow/deny
|
||||||
cidrTree *cidr.Tree6
|
cidrTree *cidr.Tree6[bool]
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteAllowList struct {
|
type RemoteAllowList struct {
|
||||||
@@ -20,7 +20,7 @@ type RemoteAllowList struct {
|
|||||||
|
|
||||||
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
||||||
// are *AllowList
|
// are *AllowList
|
||||||
insideAllowLists *cidr.Tree6
|
insideAllowLists *cidr.Tree6[*AllowList]
|
||||||
}
|
}
|
||||||
|
|
||||||
type LocalAllowList struct {
|
type LocalAllowList struct {
|
||||||
@@ -88,7 +88,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||||
}
|
}
|
||||||
|
|
||||||
tree := cidr.NewTree6()
|
tree := cidr.NewTree6[bool]()
|
||||||
|
|
||||||
// Keep track of the rules we have added for both ipv4 and ipv6
|
// Keep track of the rules we have added for both ipv4 and ipv6
|
||||||
type allowListRules struct {
|
type allowListRules struct {
|
||||||
@@ -218,13 +218,13 @@ func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error
|
|||||||
return nameRules, nil
|
return nameRules, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
|
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6[*AllowList], error) {
|
||||||
value := c.Get(k)
|
value := c.Get(k)
|
||||||
if value == nil {
|
if value == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteAllowRanges := cidr.NewTree6()
|
remoteAllowRanges := cidr.NewTree6[*AllowList]()
|
||||||
|
|
||||||
rawMap, ok := value.(map[interface{}]interface{})
|
rawMap, ok := value.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -257,13 +257,8 @@ func (al *AllowList) Allow(ip net.IP) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
result := al.cidrTree.MostSpecificContains(ip)
|
_, result := al.cidrTree.MostSpecificContains(ip)
|
||||||
switch v := result.(type) {
|
return result
|
||||||
case bool:
|
|
||||||
return v
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
||||||
@@ -271,13 +266,8 @@ func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
_, result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
||||||
switch v := result.(type) {
|
return result
|
||||||
case bool:
|
|
||||||
return v
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
||||||
@@ -285,13 +275,8 @@ func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
_, result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
||||||
switch v := result.(type) {
|
return result
|
||||||
case bool:
|
|
||||||
return v
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *LocalAllowList) Allow(ip net.IP) bool {
|
func (al *LocalAllowList) Allow(ip net.IP) bool {
|
||||||
@@ -352,9 +337,9 @@ func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
|
|||||||
|
|
||||||
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
|
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
|
||||||
if al.insideAllowLists != nil {
|
if al.insideAllowLists != nil {
|
||||||
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
ok, inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
||||||
if inside != nil {
|
if ok {
|
||||||
return inside.(*AllowList)
|
return inside
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||||||
func TestAllowList_Allow(t *testing.T) {
|
func TestAllowList_Allow(t *testing.T) {
|
||||||
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
|
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
|
||||||
|
|
||||||
tree := cidr.NewTree6()
|
tree := cidr.NewTree6[bool]()
|
||||||
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
|
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
|
||||||
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
|
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
|
||||||
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
|
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
|
||||||
|
|||||||
8
boring.go
Normal file
8
boring.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
//go:build boringcrypto
|
||||||
|
// +build boringcrypto
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
import "crypto/boring"
|
||||||
|
|
||||||
|
var boringEnabled = boring.Enabled
|
||||||
143
calculated_remote.go
Normal file
143
calculated_remote.go
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This allows us to "guess" what the remote might be for a host while we wait
|
||||||
|
// for the lighthouse response. See "lighthouse.calculated_remotes" in the
|
||||||
|
// example config file.
|
||||||
|
type calculatedRemote struct {
|
||||||
|
ipNet net.IPNet
|
||||||
|
maskIP iputil.VpnIp
|
||||||
|
mask iputil.VpnIp
|
||||||
|
port uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCalculatedRemote(ipNet *net.IPNet, port int) (*calculatedRemote, error) {
|
||||||
|
// Ensure this is an IPv4 mask that we expect
|
||||||
|
ones, bits := ipNet.Mask.Size()
|
||||||
|
if ones == 0 || bits != 32 {
|
||||||
|
return nil, fmt.Errorf("invalid mask: %v", ipNet)
|
||||||
|
}
|
||||||
|
if port < 0 || port > math.MaxUint16 {
|
||||||
|
return nil, fmt.Errorf("invalid port: %d", port)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &calculatedRemote{
|
||||||
|
ipNet: *ipNet,
|
||||||
|
maskIP: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
|
mask: iputil.Ip2VpnIp(ipNet.Mask),
|
||||||
|
port: uint32(port),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *calculatedRemote) String() string {
|
||||||
|
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
|
||||||
|
// Combine the masked bytes of the "mask" IP with the unmasked bytes
|
||||||
|
// of the overlay IP
|
||||||
|
masked := (c.maskIP & c.mask) | (ip & ^c.mask)
|
||||||
|
|
||||||
|
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4[[]*calculatedRemote], error) {
|
||||||
|
value := c.Get(k)
|
||||||
|
if value == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatedRemotes := cidr.NewTree4[[]*calculatedRemote]()
|
||||||
|
|
||||||
|
rawMap, ok := value.(map[any]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||||
|
}
|
||||||
|
for rawKey, rawValue := range rawMap {
|
||||||
|
rawCIDR, ok := rawKey.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, err := newCalculatedRemotesListFromConfig(rawValue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatedRemotes.AddCIDR(ipNet, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
return calculatedRemotes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCalculatedRemotesListFromConfig(raw any) ([]*calculatedRemote, error) {
|
||||||
|
rawList, ok := raw.([]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("calculated_remotes entry has invalid type: %T", raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
var l []*calculatedRemote
|
||||||
|
for _, e := range rawList {
|
||||||
|
c, err := newCalculatedRemotesEntryFromConfig(e)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("calculated_remotes entry: %w", err)
|
||||||
|
}
|
||||||
|
l = append(l, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
||||||
|
rawMap, ok := raw.(map[any]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid type: %T", raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawValue := rawMap["mask"]
|
||||||
|
if rawValue == nil {
|
||||||
|
return nil, fmt.Errorf("missing mask: %v", rawMap)
|
||||||
|
}
|
||||||
|
rawMask, ok := rawValue.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
|
||||||
|
}
|
||||||
|
_, ipNet, err := net.ParseCIDR(rawMask)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid mask: %s", rawMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
var port int
|
||||||
|
rawValue = rawMap["port"]
|
||||||
|
if rawValue == nil {
|
||||||
|
return nil, fmt.Errorf("missing port: %v", rawMap)
|
||||||
|
}
|
||||||
|
switch v := rawValue.(type) {
|
||||||
|
case int:
|
||||||
|
port = v
|
||||||
|
case string:
|
||||||
|
port, err = strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid port: %s: %w", v, err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newCalculatedRemote(ipNet, port)
|
||||||
|
}
|
||||||
27
calculated_remote_test.go
Normal file
27
calculated_remote_test.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCalculatedRemoteApply(t *testing.T) {
|
||||||
|
_, ipNet, err := net.ParseCIDR("192.168.1.0/24")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c, err := newCalculatedRemote(ipNet, 4242)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
input := iputil.Ip2VpnIp([]byte{10, 0, 10, 182})
|
||||||
|
|
||||||
|
expected := &Ip4AndPort{
|
||||||
|
Ip: uint32(iputil.Ip2VpnIp([]byte{192, 168, 1, 182})),
|
||||||
|
Port: 4242,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, expected, c.Apply(input))
|
||||||
|
}
|
||||||
163
cert.go
163
cert.go
@@ -1,163 +0,0 @@
|
|||||||
package nebula
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/slackhq/nebula/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CertState struct {
|
|
||||||
certificate *cert.NebulaCertificate
|
|
||||||
rawCertificate []byte
|
|
||||||
rawCertificateNoKey []byte
|
|
||||||
publicKey []byte
|
|
||||||
privateKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) {
|
|
||||||
// Marshal the certificate to ensure it is valid
|
|
||||||
rawCertificate, err := certificate.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
publicKey := certificate.Details.PublicKey
|
|
||||||
cs := &CertState{
|
|
||||||
rawCertificate: rawCertificate,
|
|
||||||
certificate: certificate, // PublicKey has been set to nil above
|
|
||||||
privateKey: privateKey,
|
|
||||||
publicKey: publicKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
cs.certificate.Details.PublicKey = nil
|
|
||||||
rawCertNoKey, err := cs.certificate.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error marshalling certificate no key: %s", err)
|
|
||||||
}
|
|
||||||
cs.rawCertificateNoKey = rawCertNoKey
|
|
||||||
// put public key back
|
|
||||||
cs.certificate.Details.PublicKey = cs.publicKey
|
|
||||||
return cs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCertStateFromConfig(c *config.C) (*CertState, error) {
|
|
||||||
var pemPrivateKey []byte
|
|
||||||
var err error
|
|
||||||
|
|
||||||
privPathOrPEM := c.GetString("pki.key", "")
|
|
||||||
|
|
||||||
if privPathOrPEM == "" {
|
|
||||||
return nil, errors.New("no pki.key path or PEM data provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(privPathOrPEM, "-----BEGIN") {
|
|
||||||
pemPrivateKey = []byte(privPathOrPEM)
|
|
||||||
privPathOrPEM = "<inline>"
|
|
||||||
} else {
|
|
||||||
pemPrivateKey, err = ioutil.ReadFile(privPathOrPEM)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rawKey, _, err := cert.UnmarshalX25519PrivateKey(pemPrivateKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var rawCert []byte
|
|
||||||
|
|
||||||
pubPathOrPEM := c.GetString("pki.cert", "")
|
|
||||||
|
|
||||||
if pubPathOrPEM == "" {
|
|
||||||
return nil, errors.New("no pki.cert path or PEM data provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(pubPathOrPEM, "-----BEGIN") {
|
|
||||||
rawCert = []byte(pubPathOrPEM)
|
|
||||||
pubPathOrPEM = "<inline>"
|
|
||||||
} else {
|
|
||||||
rawCert, err = ioutil.ReadFile(pubPathOrPEM)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nebulaCert.Expired(time.Now()) {
|
|
||||||
return nil, fmt.Errorf("nebula certificate for this host is expired")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(nebulaCert.Details.Ips) == 0 {
|
|
||||||
return nil, fmt.Errorf("no IPs encoded in certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = nebulaCert.VerifyPrivateKey(rawKey); err != nil {
|
|
||||||
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewCertState(nebulaCert, rawKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
|
|
||||||
var rawCA []byte
|
|
||||||
var err error
|
|
||||||
|
|
||||||
caPathOrPEM := c.GetString("pki.ca", "")
|
|
||||||
if caPathOrPEM == "" {
|
|
||||||
return nil, errors.New("no pki.ca path or PEM data provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(caPathOrPEM, "-----BEGIN") {
|
|
||||||
rawCA = []byte(caPathOrPEM)
|
|
||||||
|
|
||||||
} else {
|
|
||||||
rawCA, err = ioutil.ReadFile(caPathOrPEM)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CAs, err := cert.NewCAPoolFromBytes(rawCA)
|
|
||||||
if errors.Is(err, cert.ErrExpired) {
|
|
||||||
var expired int
|
|
||||||
for _, cert := range CAs.CAs {
|
|
||||||
if cert.Expired(time.Now()) {
|
|
||||||
expired++
|
|
||||||
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if expired >= len(CAs.CAs) {
|
|
||||||
return nil, errors.New("no valid CA certificates present")
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
|
|
||||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
|
||||||
CAs.BlocklistFingerprint(fp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Support deprecated config for at least one minor release to allow for migrations
|
|
||||||
//TODO: remove in 2022 or later
|
|
||||||
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
|
|
||||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
|
||||||
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
|
|
||||||
CAs.BlocklistFingerprint(fp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return CAs, nil
|
|
||||||
}
|
|
||||||
10
cert/ca.go
10
cert/ca.go
@@ -91,9 +91,15 @@ func (ncp *NebulaCAPool) ResetCertBlocklist() {
|
|||||||
ncp.certBlocklist = make(map[string]struct{})
|
ncp.certBlocklist = make(map[string]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
|
// NOTE: This uses an internal cache for Sha256Sum() that will not be invalidated
|
||||||
|
// automatically if you manually change any fields in the NebulaCertificate.
|
||||||
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
|
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
|
||||||
h, err := c.Sha256Sum()
|
return ncp.isBlocklistedWithCache(c, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
|
||||||
|
func (ncp *NebulaCAPool) isBlocklistedWithCache(c *NebulaCertificate, useCache bool) bool {
|
||||||
|
h, err := c.sha256SumWithCache(useCache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
480
cert/cert.go
480
cert/cert.go
@@ -2,35 +2,55 @@ package cert
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto"
|
"crypto/ecdh"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
"net"
|
"net"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/curve25519"
|
"golang.org/x/crypto/curve25519"
|
||||||
"golang.org/x/crypto/ed25519"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const publicKeyLen = 32
|
const publicKeyLen = 32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
CertBanner = "NEBULA CERTIFICATE"
|
CertBanner = "NEBULA CERTIFICATE"
|
||||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||||
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||||
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
||||||
|
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
||||||
|
|
||||||
|
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
||||||
|
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
||||||
|
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
|
||||||
|
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NebulaCertificate struct {
|
type NebulaCertificate struct {
|
||||||
Details NebulaCertificateDetails
|
Details NebulaCertificateDetails
|
||||||
Signature []byte
|
Signature []byte
|
||||||
|
|
||||||
|
// the cached hex string of the calculated sha256sum
|
||||||
|
// for VerifyWithCache
|
||||||
|
sha256sum atomic.Pointer[string]
|
||||||
|
|
||||||
|
// the cached public key bytes if they were verified as the signer
|
||||||
|
// for VerifyWithCache
|
||||||
|
signatureVerified atomic.Pointer[[]byte]
|
||||||
}
|
}
|
||||||
|
|
||||||
type NebulaCertificateDetails struct {
|
type NebulaCertificateDetails struct {
|
||||||
@@ -46,10 +66,25 @@ type NebulaCertificateDetails struct {
|
|||||||
|
|
||||||
// Map of groups for faster lookup
|
// Map of groups for faster lookup
|
||||||
InvertedGroups map[string]struct{}
|
InvertedGroups map[string]struct{}
|
||||||
|
|
||||||
|
Curve Curve
|
||||||
|
}
|
||||||
|
|
||||||
|
type NebulaEncryptedData struct {
|
||||||
|
EncryptionMetadata NebulaEncryptionMetadata
|
||||||
|
Ciphertext []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type NebulaEncryptionMetadata struct {
|
||||||
|
EncryptionAlgorithm string
|
||||||
|
Argon2Parameters Argon2Parameters
|
||||||
}
|
}
|
||||||
|
|
||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
|
|
||||||
|
// Returned if we try to unmarshal an encrypted private key without a passphrase
|
||||||
|
var ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
|
||||||
|
|
||||||
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert
|
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert
|
||||||
func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
|
func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
@@ -84,6 +119,7 @@ func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
|
|||||||
PublicKey: make([]byte, len(rc.Details.PublicKey)),
|
PublicKey: make([]byte, len(rc.Details.PublicKey)),
|
||||||
IsCA: rc.Details.IsCA,
|
IsCA: rc.Details.IsCA,
|
||||||
InvertedGroups: make(map[string]struct{}),
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
Curve: rc.Details.Curve,
|
||||||
},
|
},
|
||||||
Signature: make([]byte, len(rc.Signature)),
|
Signature: make([]byte, len(rc.Signature)),
|
||||||
}
|
}
|
||||||
@@ -134,6 +170,28 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
|
|||||||
return nc, r, err
|
return nc, r, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MarshalPrivateKey(curve Curve, b []byte) []byte {
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: P256PrivateKeyBanner, Bytes: b})
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalSigningPrivateKey(curve Curve, b []byte) []byte {
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: b})
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: ECDSAP256PrivateKeyBanner, Bytes: b})
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalX25519PrivateKey is a simple helper to PEM encode an X25519 private key
|
// MarshalX25519PrivateKey is a simple helper to PEM encode an X25519 private key
|
||||||
func MarshalX25519PrivateKey(b []byte) []byte {
|
func MarshalX25519PrivateKey(b []byte) []byte {
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
||||||
@@ -144,6 +202,90 @@ func MarshalEd25519PrivateKey(key ed25519.PrivateKey) []byte {
|
|||||||
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: key})
|
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: key})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UnmarshalPrivateKey(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
var expectedLen int
|
||||||
|
var curve Curve
|
||||||
|
switch k.Type {
|
||||||
|
case X25519PrivateKeyBanner:
|
||||||
|
expectedLen = 32
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
case P256PrivateKeyBanner:
|
||||||
|
expectedLen = 32
|
||||||
|
curve = Curve_P256
|
||||||
|
default:
|
||||||
|
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula private key banner")
|
||||||
|
}
|
||||||
|
if len(k.Bytes) != expectedLen {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s private key", expectedLen, curve)
|
||||||
|
}
|
||||||
|
return k.Bytes, r, curve, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalSigningPrivateKey(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
var curve Curve
|
||||||
|
switch k.Type {
|
||||||
|
case EncryptedEd25519PrivateKeyBanner:
|
||||||
|
return nil, nil, Curve_CURVE25519, ErrPrivateKeyEncrypted
|
||||||
|
case EncryptedECDSAP256PrivateKeyBanner:
|
||||||
|
return nil, nil, Curve_P256, ErrPrivateKeyEncrypted
|
||||||
|
case Ed25519PrivateKeyBanner:
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
if len(k.Bytes) != ed25519.PrivateKeySize {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid Ed25519 private key", ed25519.PrivateKeySize)
|
||||||
|
}
|
||||||
|
case ECDSAP256PrivateKeyBanner:
|
||||||
|
curve = Curve_P256
|
||||||
|
if len(k.Bytes) != 32 {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula Ed25519/ECDSA private key banner")
|
||||||
|
}
|
||||||
|
return k.Bytes, r, curve, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncryptAndMarshalSigningPrivateKey is a simple helper to encrypt and PEM encode a private key
|
||||||
|
func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte, kdfParams *Argon2Parameters) ([]byte, error) {
|
||||||
|
ciphertext, err := aes256Encrypt(passphrase, kdfParams, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = proto.Marshal(&RawNebulaEncryptedData{
|
||||||
|
EncryptionMetadata: &RawNebulaEncryptionMetadata{
|
||||||
|
EncryptionAlgorithm: "AES-256-GCM",
|
||||||
|
Argon2Parameters: &RawNebulaArgon2Parameters{
|
||||||
|
Version: kdfParams.version,
|
||||||
|
Memory: kdfParams.Memory,
|
||||||
|
Parallelism: uint32(kdfParams.Parallelism),
|
||||||
|
Iterations: kdfParams.Iterations,
|
||||||
|
Salt: kdfParams.salt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ciphertext: ciphertext,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: EncryptedEd25519PrivateKeyBanner, Bytes: b}), nil
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: EncryptedECDSAP256PrivateKeyBanner, Bytes: b}), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid curve: %v", curve)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalX25519PrivateKey will try to pem decode an X25519 private key, returning any other bytes b
|
// UnmarshalX25519PrivateKey will try to pem decode an X25519 private key, returning any other bytes b
|
||||||
// or an error on failure
|
// or an error on failure
|
||||||
func UnmarshalX25519PrivateKey(b []byte) ([]byte, []byte, error) {
|
func UnmarshalX25519PrivateKey(b []byte) ([]byte, []byte, error) {
|
||||||
@@ -168,9 +310,13 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
|
|||||||
if k == nil {
|
if k == nil {
|
||||||
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
if k.Type != Ed25519PrivateKeyBanner {
|
|
||||||
|
if k.Type == EncryptedEd25519PrivateKeyBanner {
|
||||||
|
return nil, r, ErrPrivateKeyEncrypted
|
||||||
|
} else if k.Type != Ed25519PrivateKeyBanner {
|
||||||
return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 private key banner")
|
return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 private key banner")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(k.Bytes) != ed25519.PrivateKeySize {
|
if len(k.Bytes) != ed25519.PrivateKeySize {
|
||||||
return nil, r, fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
return nil, r, fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
||||||
}
|
}
|
||||||
@@ -178,6 +324,126 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
|
|||||||
return k.Bytes, r, nil
|
return k.Bytes, r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert into its
|
||||||
|
// protobuf-generated struct.
|
||||||
|
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return nil, fmt.Errorf("nil byte array")
|
||||||
|
}
|
||||||
|
var rned RawNebulaEncryptedData
|
||||||
|
err := proto.Unmarshal(b, &rned)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if rned.EncryptionMetadata == nil {
|
||||||
|
return nil, fmt.Errorf("encoded EncryptionMetadata was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rned.EncryptionMetadata.Argon2Parameters == nil {
|
||||||
|
return nil, fmt.Errorf("encoded Argon2Parameters was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := unmarshalArgon2Parameters(rned.EncryptionMetadata.Argon2Parameters)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ned := NebulaEncryptedData{
|
||||||
|
EncryptionMetadata: NebulaEncryptionMetadata{
|
||||||
|
EncryptionAlgorithm: rned.EncryptionMetadata.EncryptionAlgorithm,
|
||||||
|
Argon2Parameters: *params,
|
||||||
|
},
|
||||||
|
Ciphertext: rned.Ciphertext,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ned, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalArgon2Parameters(params *RawNebulaArgon2Parameters) (*Argon2Parameters, error) {
|
||||||
|
if params.Version < math.MinInt32 || params.Version > math.MaxInt32 {
|
||||||
|
return nil, fmt.Errorf("Argon2Parameters Version must be at least %d and no more than %d", math.MinInt32, math.MaxInt32)
|
||||||
|
}
|
||||||
|
if params.Memory <= 0 || params.Memory > math.MaxUint32 {
|
||||||
|
return nil, fmt.Errorf("Argon2Parameters Memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
if params.Parallelism <= 0 || params.Parallelism > math.MaxUint8 {
|
||||||
|
return nil, fmt.Errorf("Argon2Parameters Parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
|
||||||
|
}
|
||||||
|
if params.Iterations <= 0 || params.Iterations > math.MaxUint32 {
|
||||||
|
return nil, fmt.Errorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Argon2Parameters{
|
||||||
|
version: rune(params.Version),
|
||||||
|
Memory: uint32(params.Memory),
|
||||||
|
Parallelism: uint8(params.Parallelism),
|
||||||
|
Iterations: uint32(params.Iterations),
|
||||||
|
salt: params.Salt,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecryptAndUnmarshalSigningPrivateKey will try to pem decode and decrypt an Ed25519/ECDSA private key with
|
||||||
|
// the given passphrase, returning any other bytes b or an error on failure
|
||||||
|
func DecryptAndUnmarshalSigningPrivateKey(passphrase, b []byte) (Curve, []byte, []byte, error) {
|
||||||
|
var curve Curve
|
||||||
|
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return curve, nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k.Type {
|
||||||
|
case EncryptedEd25519PrivateKeyBanner:
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
case EncryptedECDSAP256PrivateKeyBanner:
|
||||||
|
curve = Curve_P256
|
||||||
|
default:
|
||||||
|
return curve, nil, r, fmt.Errorf("bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
||||||
|
}
|
||||||
|
|
||||||
|
ned, err := UnmarshalNebulaEncryptedData(k.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return curve, nil, r, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var bytes []byte
|
||||||
|
switch ned.EncryptionMetadata.EncryptionAlgorithm {
|
||||||
|
case "AES-256-GCM":
|
||||||
|
bytes, err = aes256Decrypt(passphrase, &ned.EncryptionMetadata.Argon2Parameters, ned.Ciphertext)
|
||||||
|
if err != nil {
|
||||||
|
return curve, nil, r, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return curve, nil, r, fmt.Errorf("unsupported encryption algorithm: %s", ned.EncryptionMetadata.EncryptionAlgorithm)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
if len(bytes) != ed25519.PrivateKeySize {
|
||||||
|
return curve, nil, r, fmt.Errorf("key was not %d bytes, is invalid ed25519 private key", ed25519.PrivateKeySize)
|
||||||
|
}
|
||||||
|
case Curve_P256:
|
||||||
|
if len(bytes) != 32 {
|
||||||
|
return curve, nil, r, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return curve, bytes, r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalPublicKey(curve Curve, b []byte) []byte {
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: P256PublicKeyBanner, Bytes: b})
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalX25519PublicKey is a simple helper to PEM encode an X25519 public key
|
// MarshalX25519PublicKey is a simple helper to PEM encode an X25519 public key
|
||||||
func MarshalX25519PublicKey(b []byte) []byte {
|
func MarshalX25519PublicKey(b []byte) []byte {
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
||||||
@@ -188,6 +454,30 @@ func MarshalEd25519PublicKey(key ed25519.PublicKey) []byte {
|
|||||||
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: key})
|
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: key})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UnmarshalPublicKey(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
var expectedLen int
|
||||||
|
var curve Curve
|
||||||
|
switch k.Type {
|
||||||
|
case X25519PublicKeyBanner:
|
||||||
|
expectedLen = 32
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
case P256PublicKeyBanner:
|
||||||
|
// Uncompressed
|
||||||
|
expectedLen = 65
|
||||||
|
curve = Curve_P256
|
||||||
|
default:
|
||||||
|
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula public key banner")
|
||||||
|
}
|
||||||
|
if len(k.Bytes) != expectedLen {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s public key", expectedLen, curve)
|
||||||
|
}
|
||||||
|
return k.Bytes, r, curve, nil
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalX25519PublicKey will try to pem decode an X25519 public key, returning any other bytes b
|
// UnmarshalX25519PublicKey will try to pem decode an X25519 public key, returning any other bytes b
|
||||||
// or an error on failure
|
// or an error on failure
|
||||||
func UnmarshalX25519PublicKey(b []byte) ([]byte, []byte, error) {
|
func UnmarshalX25519PublicKey(b []byte) ([]byte, []byte, error) {
|
||||||
@@ -223,27 +513,86 @@ func UnmarshalEd25519PublicKey(b []byte) (ed25519.PublicKey, []byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sign signs a nebula cert with the provided private key
|
// Sign signs a nebula cert with the provided private key
|
||||||
func (nc *NebulaCertificate) Sign(key ed25519.PrivateKey) error {
|
func (nc *NebulaCertificate) Sign(curve Curve, key []byte) error {
|
||||||
|
if curve != nc.Details.Curve {
|
||||||
|
return fmt.Errorf("curve in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
|
|
||||||
b, err := proto.Marshal(nc.getRawDetails())
|
b, err := proto.Marshal(nc.getRawDetails())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sig, err := key.Sign(rand.Reader, b, crypto.Hash(0))
|
var sig []byte
|
||||||
if err != nil {
|
|
||||||
return err
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
signer := ed25519.PrivateKey(key)
|
||||||
|
sig = ed25519.Sign(signer, b)
|
||||||
|
case Curve_P256:
|
||||||
|
signer := &ecdsa.PrivateKey{
|
||||||
|
PublicKey: ecdsa.PublicKey{
|
||||||
|
Curve: elliptic.P256(),
|
||||||
|
},
|
||||||
|
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L95
|
||||||
|
D: new(big.Int).SetBytes(key),
|
||||||
|
}
|
||||||
|
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L119
|
||||||
|
signer.X, signer.Y = signer.Curve.ScalarBaseMult(key)
|
||||||
|
|
||||||
|
// We need to hash first for ECDSA
|
||||||
|
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
|
||||||
|
hashed := sha256.Sum256(b)
|
||||||
|
sig, err = ecdsa.SignASN1(rand.Reader, signer, hashed[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", nc.Details.Curve)
|
||||||
}
|
}
|
||||||
|
|
||||||
nc.Signature = sig
|
nc.Signature = sig
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckSignature verifies the signature against the provided public key
|
// CheckSignature verifies the signature against the provided public key
|
||||||
func (nc *NebulaCertificate) CheckSignature(key ed25519.PublicKey) bool {
|
func (nc *NebulaCertificate) CheckSignature(key []byte) bool {
|
||||||
b, err := proto.Marshal(nc.getRawDetails())
|
b, err := proto.Marshal(nc.getRawDetails())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return ed25519.Verify(key, b, nc.Signature)
|
switch nc.Details.Curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return ed25519.Verify(ed25519.PublicKey(key), b, nc.Signature)
|
||||||
|
case Curve_P256:
|
||||||
|
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
||||||
|
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
||||||
|
hashed := sha256.Sum256(b)
|
||||||
|
return ecdsa.VerifyASN1(pubKey, hashed[:], nc.Signature)
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This uses an internal cache that will not be invalidated automatically
|
||||||
|
// if you manually change any fields in the NebulaCertificate.
|
||||||
|
func (nc *NebulaCertificate) checkSignatureWithCache(key []byte, useCache bool) bool {
|
||||||
|
if !useCache {
|
||||||
|
return nc.CheckSignature(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := nc.signatureVerified.Load(); v != nil {
|
||||||
|
return bytes.Equal(*v, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
verified := nc.CheckSignature(key)
|
||||||
|
if verified {
|
||||||
|
keyCopy := make([]byte, len(key))
|
||||||
|
copy(keyCopy, key)
|
||||||
|
nc.signatureVerified.Store(&keyCopy)
|
||||||
|
}
|
||||||
|
|
||||||
|
return verified
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expired will return true if the nebula cert is too young or too old compared to the provided time, otherwise false
|
// Expired will return true if the nebula cert is too young or too old compared to the provided time, otherwise false
|
||||||
@@ -253,8 +602,27 @@ func (nc *NebulaCertificate) Expired(t time.Time) bool {
|
|||||||
|
|
||||||
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
||||||
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
||||||
if ncp.IsBlocklisted(nc) {
|
return nc.verify(t, ncp, false)
|
||||||
return false, fmt.Errorf("certificate has been blocked")
|
}
|
||||||
|
|
||||||
|
// VerifyWithCache will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
||||||
|
//
|
||||||
|
// NOTE: This uses an internal cache that will not be invalidated automatically
|
||||||
|
// if you manually change any fields in the NebulaCertificate.
|
||||||
|
func (nc *NebulaCertificate) VerifyWithCache(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
||||||
|
return nc.verify(t, ncp, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetCache resets the cache used by VerifyWithCache.
|
||||||
|
func (nc *NebulaCertificate) ResetCache() {
|
||||||
|
nc.sha256sum.Store(nil)
|
||||||
|
nc.signatureVerified.Store(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
||||||
|
func (nc *NebulaCertificate) verify(t time.Time, ncp *NebulaCAPool, useCache bool) (bool, error) {
|
||||||
|
if ncp.isBlocklistedWithCache(nc, useCache) {
|
||||||
|
return false, ErrBlockListed
|
||||||
}
|
}
|
||||||
|
|
||||||
signer, err := ncp.GetCAForCert(nc)
|
signer, err := ncp.GetCAForCert(nc)
|
||||||
@@ -263,15 +631,15 @@ func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
if signer.Expired(t) {
|
if signer.Expired(t) {
|
||||||
return false, fmt.Errorf("root certificate is expired")
|
return false, ErrRootExpired
|
||||||
}
|
}
|
||||||
|
|
||||||
if nc.Expired(t) {
|
if nc.Expired(t) {
|
||||||
return false, fmt.Errorf("certificate is expired")
|
return false, ErrExpired
|
||||||
}
|
}
|
||||||
|
|
||||||
if !nc.CheckSignature(signer.Details.PublicKey) {
|
if !nc.checkSignatureWithCache(signer.Details.PublicKey, useCache) {
|
||||||
return false, fmt.Errorf("certificate signature did not match")
|
return false, ErrSignatureMismatch
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := nc.CheckRootConstrains(signer); err != nil {
|
if err := nc.CheckRootConstrains(signer); err != nil {
|
||||||
@@ -324,22 +692,52 @@ func (nc *NebulaCertificate) CheckRootConstrains(signer *NebulaCertificate) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
|
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
|
||||||
func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error {
|
func (nc *NebulaCertificate) VerifyPrivateKey(curve Curve, key []byte) error {
|
||||||
|
if curve != nc.Details.Curve {
|
||||||
|
return fmt.Errorf("curve in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
if nc.Details.IsCA {
|
if nc.Details.IsCA {
|
||||||
// the call to PublicKey below will panic slice bounds out of range otherwise
|
switch curve {
|
||||||
if len(key) != ed25519.PrivateKeySize {
|
case Curve_CURVE25519:
|
||||||
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
// the call to PublicKey below will panic slice bounds out of range otherwise
|
||||||
}
|
if len(key) != ed25519.PrivateKeySize {
|
||||||
|
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
||||||
|
}
|
||||||
|
|
||||||
if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
|
case Curve_P256:
|
||||||
|
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot parse private key as P256")
|
||||||
|
}
|
||||||
|
pub := privkey.PublicKey().Bytes()
|
||||||
|
if !bytes.Equal(pub, nc.Details.PublicKey) {
|
||||||
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", curve)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, err := curve25519.X25519(key, curve25519.Basepoint)
|
var pub []byte
|
||||||
if err != nil {
|
switch curve {
|
||||||
return err
|
case Curve_CURVE25519:
|
||||||
|
var err error
|
||||||
|
pub, err = curve25519.X25519(key, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case Curve_P256:
|
||||||
|
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pub = privkey.PublicKey().Bytes()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", curve)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(pub, nc.Details.PublicKey) {
|
if !bytes.Equal(pub, nc.Details.PublicKey) {
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
@@ -393,6 +791,7 @@ func (nc *NebulaCertificate) String() string {
|
|||||||
s += fmt.Sprintf("\t\tIs CA: %v\n", nc.Details.IsCA)
|
s += fmt.Sprintf("\t\tIs CA: %v\n", nc.Details.IsCA)
|
||||||
s += fmt.Sprintf("\t\tIssuer: %s\n", nc.Details.Issuer)
|
s += fmt.Sprintf("\t\tIssuer: %s\n", nc.Details.Issuer)
|
||||||
s += fmt.Sprintf("\t\tPublic key: %x\n", nc.Details.PublicKey)
|
s += fmt.Sprintf("\t\tPublic key: %x\n", nc.Details.PublicKey)
|
||||||
|
s += fmt.Sprintf("\t\tCurve: %s\n", nc.Details.Curve)
|
||||||
s += "\t}\n"
|
s += "\t}\n"
|
||||||
fp, err := nc.Sha256Sum()
|
fp, err := nc.Sha256Sum()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -413,6 +812,7 @@ func (nc *NebulaCertificate) getRawDetails() *RawNebulaCertificateDetails {
|
|||||||
NotAfter: nc.Details.NotAfter.Unix(),
|
NotAfter: nc.Details.NotAfter.Unix(),
|
||||||
PublicKey: make([]byte, len(nc.Details.PublicKey)),
|
PublicKey: make([]byte, len(nc.Details.PublicKey)),
|
||||||
IsCA: nc.Details.IsCA,
|
IsCA: nc.Details.IsCA,
|
||||||
|
Curve: nc.Details.Curve,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ipNet := range nc.Details.Ips {
|
for _, ipNet := range nc.Details.Ips {
|
||||||
@@ -461,6 +861,25 @@ func (nc *NebulaCertificate) Sha256Sum() (string, error) {
|
|||||||
return hex.EncodeToString(sum[:]), nil
|
return hex.EncodeToString(sum[:]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: This uses an internal cache that will not be invalidated automatically
|
||||||
|
// if you manually change any fields in the NebulaCertificate.
|
||||||
|
func (nc *NebulaCertificate) sha256SumWithCache(useCache bool) (string, error) {
|
||||||
|
if !useCache {
|
||||||
|
return nc.Sha256Sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := nc.sha256sum.Load(); s != nil {
|
||||||
|
return *s, nil
|
||||||
|
}
|
||||||
|
s, err := nc.Sha256Sum()
|
||||||
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nc.sha256sum.Store(&s)
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
|
func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
|
||||||
toString := func(ips []*net.IPNet) []string {
|
toString := func(ips []*net.IPNet) []string {
|
||||||
s := []string{}
|
s := []string{}
|
||||||
@@ -482,6 +901,7 @@ func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
|
|||||||
"publicKey": fmt.Sprintf("%x", nc.Details.PublicKey),
|
"publicKey": fmt.Sprintf("%x", nc.Details.PublicKey),
|
||||||
"isCa": nc.Details.IsCA,
|
"isCa": nc.Details.IsCA,
|
||||||
"issuer": nc.Details.Issuer,
|
"issuer": nc.Details.Issuer,
|
||||||
|
"curve": nc.Details.Curve.String(),
|
||||||
},
|
},
|
||||||
"fingerprint": fp,
|
"fingerprint": fp,
|
||||||
"signature": fmt.Sprintf("%x", nc.Signature),
|
"signature": fmt.Sprintf("%x", nc.Signature),
|
||||||
|
|||||||
356
cert/cert.pb.go
356
cert/cert.pb.go
@@ -1,7 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.28.0
|
// protoc-gen-go v1.30.0
|
||||||
// protoc v3.20.0
|
// protoc v3.21.5
|
||||||
// source: cert.proto
|
// source: cert.proto
|
||||||
|
|
||||||
package cert
|
package cert
|
||||||
@@ -20,6 +20,52 @@ const (
|
|||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Curve int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Curve_CURVE25519 Curve = 0
|
||||||
|
Curve_P256 Curve = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Enum value maps for Curve.
|
||||||
|
var (
|
||||||
|
Curve_name = map[int32]string{
|
||||||
|
0: "CURVE25519",
|
||||||
|
1: "P256",
|
||||||
|
}
|
||||||
|
Curve_value = map[string]int32{
|
||||||
|
"CURVE25519": 0,
|
||||||
|
"P256": 1,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (x Curve) Enum() *Curve {
|
||||||
|
p := new(Curve)
|
||||||
|
*p = x
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Curve) String() string {
|
||||||
|
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Curve) Descriptor() protoreflect.EnumDescriptor {
|
||||||
|
return file_cert_proto_enumTypes[0].Descriptor()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Curve) Type() protoreflect.EnumType {
|
||||||
|
return &file_cert_proto_enumTypes[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Curve) Number() protoreflect.EnumNumber {
|
||||||
|
return protoreflect.EnumNumber(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use Curve.Descriptor instead.
|
||||||
|
func (Curve) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
type RawNebulaCertificate struct {
|
type RawNebulaCertificate struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
@@ -91,6 +137,7 @@ type RawNebulaCertificateDetails struct {
|
|||||||
IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"`
|
IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"`
|
||||||
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
||||||
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"`
|
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"`
|
||||||
|
Curve Curve `protobuf:"varint,100,opt,name=curve,proto3,enum=cert.Curve" json:"curve,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RawNebulaCertificateDetails) Reset() {
|
func (x *RawNebulaCertificateDetails) Reset() {
|
||||||
@@ -188,6 +235,202 @@ func (x *RawNebulaCertificateDetails) GetIssuer() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaCertificateDetails) GetCurve() Curve {
|
||||||
|
if x != nil {
|
||||||
|
return x.Curve
|
||||||
|
}
|
||||||
|
return Curve_CURVE25519
|
||||||
|
}
|
||||||
|
|
||||||
|
type RawNebulaEncryptedData struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
EncryptionMetadata *RawNebulaEncryptionMetadata `protobuf:"bytes,1,opt,name=EncryptionMetadata,proto3" json:"EncryptionMetadata,omitempty"`
|
||||||
|
Ciphertext []byte `protobuf:"bytes,2,opt,name=Ciphertext,proto3" json:"Ciphertext,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) Reset() {
|
||||||
|
*x = RawNebulaEncryptedData{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RawNebulaEncryptedData) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaEncryptedData.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RawNebulaEncryptedData) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) GetEncryptionMetadata() *RawNebulaEncryptionMetadata {
|
||||||
|
if x != nil {
|
||||||
|
return x.EncryptionMetadata
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) GetCiphertext() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Ciphertext
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RawNebulaEncryptionMetadata struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=EncryptionAlgorithm,proto3" json:"EncryptionAlgorithm,omitempty"`
|
||||||
|
Argon2Parameters *RawNebulaArgon2Parameters `protobuf:"bytes,2,opt,name=Argon2Parameters,proto3" json:"Argon2Parameters,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) Reset() {
|
||||||
|
*x = RawNebulaEncryptionMetadata{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[3]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RawNebulaEncryptionMetadata) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[3]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaEncryptionMetadata.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RawNebulaEncryptionMetadata) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) GetEncryptionAlgorithm() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.EncryptionAlgorithm
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) GetArgon2Parameters() *RawNebulaArgon2Parameters {
|
||||||
|
if x != nil {
|
||||||
|
return x.Argon2Parameters
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RawNebulaArgon2Parameters struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // rune in Go
|
||||||
|
Memory uint32 `protobuf:"varint,2,opt,name=memory,proto3" json:"memory,omitempty"`
|
||||||
|
Parallelism uint32 `protobuf:"varint,4,opt,name=parallelism,proto3" json:"parallelism,omitempty"` // uint8 in Go
|
||||||
|
Iterations uint32 `protobuf:"varint,3,opt,name=iterations,proto3" json:"iterations,omitempty"`
|
||||||
|
Salt []byte `protobuf:"bytes,5,opt,name=salt,proto3" json:"salt,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) Reset() {
|
||||||
|
*x = RawNebulaArgon2Parameters{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[4]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RawNebulaArgon2Parameters) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[4]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaArgon2Parameters.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RawNebulaArgon2Parameters) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetVersion() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Version
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetMemory() uint32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Memory
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetParallelism() uint32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Parallelism
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetIterations() uint32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Iterations
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetSalt() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Salt
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var File_cert_proto protoreflect.FileDescriptor
|
var File_cert_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_cert_proto_rawDesc = []byte{
|
var file_cert_proto_rawDesc = []byte{
|
||||||
@@ -199,7 +442,7 @@ var file_cert_proto_rawDesc = []byte{
|
|||||||
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
|
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
|
||||||
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
||||||
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
|
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
|
||||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
||||||
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
|
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
|
||||||
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
|
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
|
||||||
@@ -215,9 +458,43 @@ var file_cert_proto_rawDesc = []byte{
|
|||||||
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
|
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
|
||||||
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
|
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
|
||||||
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
|
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
|
||||||
0x72, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
0x72, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0e,
|
||||||
0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71, 0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63,
|
0x32, 0x0b, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x75, 0x72, 0x76, 0x65, 0x52, 0x05, 0x63,
|
||||||
0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
0x75, 0x72, 0x76, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
||||||
|
0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
|
||||||
|
0x51, 0x0a, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
||||||
|
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
|
||||||
|
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72,
|
||||||
|
0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12,
|
||||||
|
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74,
|
||||||
|
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
|
||||||
|
0x78, 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61,
|
||||||
|
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
|
||||||
|
0x69, 0x74, 0x68, 0x6d, 0x12, 0x4b, 0x0a, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61,
|
||||||
|
0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
|
||||||
|
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||||
|
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52,
|
||||||
|
0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
|
||||||
|
0x73, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||||
|
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12,
|
||||||
|
0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||||
|
0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d,
|
||||||
|
0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72,
|
||||||
|
0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d,
|
||||||
|
0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c,
|
||||||
|
0x69, 0x73, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||||
|
0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||||
|
0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x2a, 0x21, 0x0a, 0x05, 0x43, 0x75, 0x72, 0x76, 0x65,
|
||||||
|
0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x52, 0x56, 0x45, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x00,
|
||||||
|
0x12, 0x08, 0x0a, 0x04, 0x50, 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69,
|
||||||
|
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71,
|
||||||
|
0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72,
|
||||||
|
0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -232,18 +509,26 @@ func file_cert_proto_rawDescGZIP() []byte {
|
|||||||
return file_cert_proto_rawDescData
|
return file_cert_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
var file_cert_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||||
|
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||||
var file_cert_proto_goTypes = []interface{}{
|
var file_cert_proto_goTypes = []interface{}{
|
||||||
(*RawNebulaCertificate)(nil), // 0: cert.RawNebulaCertificate
|
(Curve)(0), // 0: cert.Curve
|
||||||
(*RawNebulaCertificateDetails)(nil), // 1: cert.RawNebulaCertificateDetails
|
(*RawNebulaCertificate)(nil), // 1: cert.RawNebulaCertificate
|
||||||
|
(*RawNebulaCertificateDetails)(nil), // 2: cert.RawNebulaCertificateDetails
|
||||||
|
(*RawNebulaEncryptedData)(nil), // 3: cert.RawNebulaEncryptedData
|
||||||
|
(*RawNebulaEncryptionMetadata)(nil), // 4: cert.RawNebulaEncryptionMetadata
|
||||||
|
(*RawNebulaArgon2Parameters)(nil), // 5: cert.RawNebulaArgon2Parameters
|
||||||
}
|
}
|
||||||
var file_cert_proto_depIdxs = []int32{
|
var file_cert_proto_depIdxs = []int32{
|
||||||
1, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
2, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
||||||
1, // [1:1] is the sub-list for method output_type
|
0, // 1: cert.RawNebulaCertificateDetails.curve:type_name -> cert.Curve
|
||||||
1, // [1:1] is the sub-list for method input_type
|
4, // 2: cert.RawNebulaEncryptedData.EncryptionMetadata:type_name -> cert.RawNebulaEncryptionMetadata
|
||||||
1, // [1:1] is the sub-list for extension type_name
|
5, // 3: cert.RawNebulaEncryptionMetadata.Argon2Parameters:type_name -> cert.RawNebulaArgon2Parameters
|
||||||
1, // [1:1] is the sub-list for extension extendee
|
4, // [4:4] is the sub-list for method output_type
|
||||||
0, // [0:1] is the sub-list for field type_name
|
4, // [4:4] is the sub-list for method input_type
|
||||||
|
4, // [4:4] is the sub-list for extension type_name
|
||||||
|
4, // [4:4] is the sub-list for extension extendee
|
||||||
|
0, // [0:4] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { file_cert_proto_init() }
|
func init() { file_cert_proto_init() }
|
||||||
@@ -276,19 +561,56 @@ func file_cert_proto_init() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
file_cert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaEncryptedData); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cert_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaEncryptionMetadata); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cert_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaArgon2Parameters); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_cert_proto_rawDesc,
|
RawDescriptor: file_cert_proto_rawDesc,
|
||||||
NumEnums: 0,
|
NumEnums: 1,
|
||||||
NumMessages: 2,
|
NumMessages: 5,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
NumServices: 0,
|
NumServices: 0,
|
||||||
},
|
},
|
||||||
GoTypes: file_cert_proto_goTypes,
|
GoTypes: file_cert_proto_goTypes,
|
||||||
DependencyIndexes: file_cert_proto_depIdxs,
|
DependencyIndexes: file_cert_proto_depIdxs,
|
||||||
|
EnumInfos: file_cert_proto_enumTypes,
|
||||||
MessageInfos: file_cert_proto_msgTypes,
|
MessageInfos: file_cert_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_cert_proto = out.File
|
File_cert_proto = out.File
|
||||||
|
|||||||
@@ -5,6 +5,11 @@ option go_package = "github.com/slackhq/nebula/cert";
|
|||||||
|
|
||||||
//import "google/protobuf/timestamp.proto";
|
//import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
enum Curve {
|
||||||
|
CURVE25519 = 0;
|
||||||
|
P256 = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message RawNebulaCertificate {
|
message RawNebulaCertificate {
|
||||||
RawNebulaCertificateDetails Details = 1;
|
RawNebulaCertificateDetails Details = 1;
|
||||||
bytes Signature = 2;
|
bytes Signature = 2;
|
||||||
@@ -26,4 +31,24 @@ message RawNebulaCertificateDetails {
|
|||||||
|
|
||||||
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
||||||
bytes Issuer = 9;
|
bytes Issuer = 9;
|
||||||
}
|
|
||||||
|
Curve curve = 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RawNebulaEncryptedData {
|
||||||
|
RawNebulaEncryptionMetadata EncryptionMetadata = 1;
|
||||||
|
bytes Ciphertext = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RawNebulaEncryptionMetadata {
|
||||||
|
string EncryptionAlgorithm = 1;
|
||||||
|
RawNebulaArgon2Parameters Argon2Parameters = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RawNebulaArgon2Parameters {
|
||||||
|
int32 version = 1; // rune in Go
|
||||||
|
uint32 memory = 2;
|
||||||
|
uint32 parallelism = 4; // uint8 in Go
|
||||||
|
uint32 iterations = 3;
|
||||||
|
bytes salt = 5;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
package cert
|
package cert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ecdh"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -101,7 +104,49 @@ func TestNebulaCertificate_Sign(t *testing.T) {
|
|||||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.False(t, nc.CheckSignature(pub))
|
assert.False(t, nc.CheckSignature(pub))
|
||||||
assert.Nil(t, nc.Sign(priv))
|
assert.Nil(t, nc.Sign(Curve_CURVE25519, priv))
|
||||||
|
assert.True(t, nc.CheckSignature(pub))
|
||||||
|
|
||||||
|
_, err = nc.Marshal()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
//t.Log("Cert size:", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNebulaCertificate_SignP256(t *testing.T) {
|
||||||
|
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
pubKey := []byte("01234567890abcedfghij1234567890ab1234567890abcedfghij1234567890ab")
|
||||||
|
|
||||||
|
nc := NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: "testing",
|
||||||
|
Ips: []*net.IPNet{
|
||||||
|
{IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
|
||||||
|
{IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
|
||||||
|
{IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
|
||||||
|
},
|
||||||
|
Subnets: []*net.IPNet{
|
||||||
|
{IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
|
||||||
|
{IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
|
||||||
|
{IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
|
||||||
|
},
|
||||||
|
Groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||||
|
NotBefore: before,
|
||||||
|
NotAfter: after,
|
||||||
|
PublicKey: pubKey,
|
||||||
|
IsCA: false,
|
||||||
|
Curve: Curve_P256,
|
||||||
|
Issuer: "1234567890abcedfghij1234567890ab",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
|
||||||
|
rawPriv := priv.D.FillBytes(make([]byte, 32))
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.False(t, nc.CheckSignature(pub))
|
||||||
|
assert.Nil(t, nc.Sign(Curve_P256, rawPriv))
|
||||||
assert.True(t, nc.CheckSignature(pub))
|
assert.True(t, nc.CheckSignature(pub))
|
||||||
|
|
||||||
_, err = nc.Marshal()
|
_, err = nc.Marshal()
|
||||||
@@ -153,7 +198,7 @@ func TestNebulaCertificate_MarshalJSON(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"{\"details\":{\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"ips\":[\"10.1.1.1/24\",\"10.1.1.2/16\",\"10.1.1.3/ff00ff00\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"subnets\":[\"9.1.1.1/ff00ff00\",\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"26cb1c30ad7872c804c166b5150fa372f437aa3856b04edb4334b4470ec728e4\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\"}",
|
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"ips\":[\"10.1.1.1/24\",\"10.1.1.2/16\",\"10.1.1.3/ff00ff00\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"subnets\":[\"9.1.1.1/ff00ff00\",\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"26cb1c30ad7872c804c166b5150fa372f437aa3856b04edb4334b4470ec728e4\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\"}",
|
||||||
string(b),
|
string(b),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -177,7 +222,7 @@ func TestNebulaCertificate_Verify(t *testing.T) {
|
|||||||
|
|
||||||
v, err := c.Verify(time.Now(), caPool)
|
v, err := c.Verify(time.Now(), caPool)
|
||||||
assert.False(t, v)
|
assert.False(t, v)
|
||||||
assert.EqualError(t, err, "certificate has been blocked")
|
assert.EqualError(t, err, "certificate is in the block list")
|
||||||
|
|
||||||
caPool.ResetCertBlocklist()
|
caPool.ResetCertBlocklist()
|
||||||
v, err = c.Verify(time.Now(), caPool)
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
@@ -217,6 +262,65 @@ func TestNebulaCertificate_Verify(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNebulaCertificate_VerifyP256(t *testing.T) {
|
||||||
|
ca, _, caKey, err := newTestCaCertP256(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
h, err := ca.Sha256Sum()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
caPool := NewCAPool()
|
||||||
|
caPool.CAs[h] = ca
|
||||||
|
|
||||||
|
f, err := c.Sha256Sum()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
caPool.BlocklistFingerprint(f)
|
||||||
|
|
||||||
|
v, err := c.Verify(time.Now(), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "certificate is in the block list")
|
||||||
|
|
||||||
|
caPool.ResetCertBlocklist()
|
||||||
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
|
assert.True(t, v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
v, err = c.Verify(time.Now().Add(time.Hour*1000), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "root certificate is expired")
|
||||||
|
|
||||||
|
c, _, _, err = newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
v, err = c.Verify(time.Now().Add(time.Minute*6), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "certificate is expired")
|
||||||
|
|
||||||
|
// Test group assertion
|
||||||
|
ca, _, caKey, err = newTestCaCertP256(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "test2"})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
caPem, err := ca.MarshalToPEM()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
caPool = NewCAPool()
|
||||||
|
caPool.AddCACertificate(caPem)
|
||||||
|
|
||||||
|
c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "bad"})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "certificate contained a group not present on the signing ca: bad")
|
||||||
|
|
||||||
|
c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1"})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
|
assert.True(t, v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNebulaCertificate_Verify_IPs(t *testing.T) {
|
func TestNebulaCertificate_Verify_IPs(t *testing.T) {
|
||||||
_, caIp1, _ := net.ParseCIDR("10.0.0.0/16")
|
_, caIp1, _ := net.ParseCIDR("10.0.0.0/16")
|
||||||
_, caIp2, _ := net.ParseCIDR("192.168.0.0/24")
|
_, caIp2, _ := net.ParseCIDR("192.168.0.0/24")
|
||||||
@@ -378,20 +482,40 @@ func TestNebulaCertificate_Verify_Subnets(t *testing.T) {
|
|||||||
func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) {
|
func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) {
|
||||||
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
err = ca.VerifyPrivateKey(caKey)
|
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
_, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
_, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
err = ca.VerifyPrivateKey(caKey2)
|
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey2)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
err = c.VerifyPrivateKey(priv)
|
err = c.VerifyPrivateKey(Curve_CURVE25519, priv)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
_, priv2 := x25519Keypair()
|
_, priv2 := x25519Keypair()
|
||||||
err = c.VerifyPrivateKey(priv2)
|
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNebulaCertificate_VerifyPrivateKeyP256(t *testing.T) {
|
||||||
|
ca, _, caKey, err := newTestCaCertP256(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
err = ca.VerifyPrivateKey(Curve_P256, caKey)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
_, _, caKey2, err := newTestCaCertP256(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
err = ca.VerifyPrivateKey(Curve_P256, caKey2)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
err = c.VerifyPrivateKey(Curve_P256, priv)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
_, priv2 := p256Keypair()
|
||||||
|
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -438,6 +562,16 @@ CjkKB2V4cGlyZWQouPmWjQYwufmWjQY6ILCRaoCkJlqHgv5jfDN4lzLHBvDzaQm4
|
|||||||
vZxfu144hmgjQAESQG4qlnZi8DncvD/LDZnLgJHOaX1DWCHHEh59epVsC+BNgTie
|
vZxfu144hmgjQAESQG4qlnZi8DncvD/LDZnLgJHOaX1DWCHHEh59epVsC+BNgTie
|
||||||
WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
||||||
-----END NEBULA CERTIFICATE-----
|
-----END NEBULA CERTIFICATE-----
|
||||||
|
`
|
||||||
|
|
||||||
|
p256 := `
|
||||||
|
# p256 certificate
|
||||||
|
-----BEGIN NEBULA CERTIFICATE-----
|
||||||
|
CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
||||||
|
6tctO9sPT3jOx8ES6M1nIqOhpTmZeabF/4rELDqPV4aH5jfJut798DUXql0FlF8H
|
||||||
|
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
|
||||||
|
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
||||||
|
-----END NEBULA CERTIFICATE-----
|
||||||
`
|
`
|
||||||
|
|
||||||
rootCA := NebulaCertificate{
|
rootCA := NebulaCertificate{
|
||||||
@@ -452,6 +586,12 @@ WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rootCAP256 := NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: "nebula P256 test",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
p, err := NewCAPoolFromBytes([]byte(noNewLines))
|
p, err := NewCAPoolFromBytes([]byte(noNewLines))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
@@ -474,6 +614,11 @@ WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
|||||||
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
||||||
assert.Equal(t, len(pppp.CAs), 3)
|
assert.Equal(t, len(pppp.CAs), 3)
|
||||||
|
|
||||||
|
ppppp, err := NewCAPoolFromBytes([]byte(p256))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
||||||
|
assert.Equal(t, len(ppppp.CAs), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendByteSlices(b ...[]byte) []byte {
|
func appendByteSlices(b ...[]byte) []byte {
|
||||||
@@ -529,11 +674,16 @@ bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
|||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalEd25519PrivateKey(t *testing.T) {
|
func TestUnmarshalSigningPrivateKey(t *testing.T) {
|
||||||
privKey := []byte(`# A good key
|
privKey := []byte(`# A good key
|
||||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
privP256Key := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA ECDSA P256 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA ECDSA P256 PRIVATE KEY-----
|
||||||
`)
|
`)
|
||||||
shortKey := []byte(`# A short key
|
shortKey := []byte(`# A short key
|
||||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
@@ -550,39 +700,139 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
-END NEBULA ED25519 PRIVATE KEY-----`)
|
-END NEBULA ED25519 PRIVATE KEY-----`)
|
||||||
|
|
||||||
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, err := UnmarshalEd25519PrivateKey(keyBundle)
|
k, rest, curve, err := UnmarshalSigningPrivateKey(keyBundle)
|
||||||
assert.Len(t, k, 64)
|
assert.Len(t, k, 64)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
|
assert.Len(t, k, 32)
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_P256, curve)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// Fail due to short key
|
// Fail due to short key
|
||||||
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
|
assert.EqualError(t, err, "key was not 64 bytes, is invalid Ed25519 private key")
|
||||||
|
|
||||||
// Fail due to invalid banner
|
// Fail due to invalid banner
|
||||||
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 private key banner")
|
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519/ECDSA private key banner")
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
// Fail due to ivalid PEM format, because
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalX25519PrivateKey(t *testing.T) {
|
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
|
||||||
|
passphrase := []byte("DO NOT USE THIS KEY")
|
||||||
|
privKey := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
||||||
|
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
||||||
|
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
||||||
|
qrlJ69wer3ZUHFXA
|
||||||
|
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
shortKey := []byte(`# A key which, once decrypted, is too short
|
||||||
|
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCoga5h8owMEBWRSMMJKzuUvWce7
|
||||||
|
k0qlBkQmCxiuLh80MuASW70YcKt8jeEIS2axo2V6zAKA9TSMcCsJW1kDDXEtL/xe
|
||||||
|
GLF5T7sDl5COp4LU3pGxpV+KoeQ/S3gQCAAcnaOtnJQX+aSDnbO3jCHyP7U9CHbs
|
||||||
|
rQr3bdH3Oy/WiYU=
|
||||||
|
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidBanner := []byte(`# Invalid banner (not encrypted)
|
||||||
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
bWRp2CTVFhW9HD/qCd28ltDgK3w8VXSeaEYczDWos8sMUBqDb9jP3+NYwcS4lURG
|
||||||
|
XgLvodMXZJuaFPssp+WwtA==
|
||||||
|
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidPem := []byte(`# Not a valid PEM format
|
||||||
|
-BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
||||||
|
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
||||||
|
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
||||||
|
qrlJ69wer3ZUHFXA
|
||||||
|
-END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
|
||||||
|
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, keyBundle)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Len(t, k, 64)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
|
||||||
|
// Fail due to short key
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||||
|
assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
|
|
||||||
|
// Fail due to invalid banner
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||||
|
assert.EqualError(t, err, "bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
|
// Fail due to ivalid PEM format, because
|
||||||
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||||
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
|
// Fail due to invalid passphrase
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey([]byte("invalid passphrase"), privKey)
|
||||||
|
assert.EqualError(t, err, "invalid passphrase or corrupt private key")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, []byte{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncryptAndMarshalSigningPrivateKey(t *testing.T) {
|
||||||
|
// Having proved that decryption works correctly above, we can test the
|
||||||
|
// encryption function produces a value which can be decrypted
|
||||||
|
passphrase := []byte("passphrase")
|
||||||
|
bytes := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
||||||
|
kdfParams := NewArgon2Parameters(64*1024, 4, 3)
|
||||||
|
key, err := EncryptAndMarshalSigningPrivateKey(Curve_CURVE25519, bytes, passphrase, kdfParams)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Verify the "key" can be decrypted successfully
|
||||||
|
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, key)
|
||||||
|
assert.Len(t, k, 64)
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Equal(t, rest, []byte{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// EncryptAndMarshalEd25519PrivateKey does not create any errors itself
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalPrivateKey(t *testing.T) {
|
||||||
privKey := []byte(`# A good key
|
privKey := []byte(`# A good key
|
||||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-----END NEBULA X25519 PRIVATE KEY-----
|
-----END NEBULA X25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
privP256Key := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA P256 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA P256 PRIVATE KEY-----
|
||||||
`)
|
`)
|
||||||
shortKey := []byte(`# A short key
|
shortKey := []byte(`# A short key
|
||||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||||
@@ -599,29 +849,37 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-END NEBULA X25519 PRIVATE KEY-----`)
|
-END NEBULA X25519 PRIVATE KEY-----`)
|
||||||
|
|
||||||
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, err := UnmarshalX25519PrivateKey(keyBundle)
|
k, rest, curve, err := UnmarshalPrivateKey(keyBundle)
|
||||||
|
assert.Len(t, k, 32)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Len(t, k, 32)
|
assert.Len(t, k, 32)
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_P256, curve)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// Fail due to short key
|
// Fail due to short key
|
||||||
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 private key")
|
assert.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 private key")
|
||||||
|
|
||||||
// Fail due to invalid banner
|
// Fail due to invalid banner
|
||||||
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 private key banner")
|
assert.EqualError(t, err, "bytes did not contain a proper nebula private key banner")
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
// Fail due to ivalid PEM format, because
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
@@ -681,6 +939,12 @@ func TestUnmarshalX25519PublicKey(t *testing.T) {
|
|||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-----END NEBULA X25519 PUBLIC KEY-----
|
-----END NEBULA X25519 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
pubP256Key := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA P256 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA P256 PUBLIC KEY-----
|
||||||
`)
|
`)
|
||||||
shortKey := []byte(`# A short key
|
shortKey := []byte(`# A short key
|
||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
@@ -697,29 +961,37 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-END NEBULA X25519 PUBLIC KEY-----`)
|
-END NEBULA X25519 PUBLIC KEY-----`)
|
||||||
|
|
||||||
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
|
keyBundle := appendByteSlices(pubKey, pubP256Key, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, err := UnmarshalX25519PublicKey(keyBundle)
|
k, rest, curve, err := UnmarshalPublicKey(keyBundle)
|
||||||
assert.Equal(t, len(k), 32)
|
assert.Equal(t, len(k), 32)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(pubP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
|
assert.Equal(t, len(k), 65)
|
||||||
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_P256, curve)
|
||||||
|
|
||||||
// Fail due to short key
|
// Fail due to short key
|
||||||
k, rest, err = UnmarshalX25519PublicKey(rest)
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 public key")
|
assert.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
|
||||||
|
|
||||||
// Fail due to invalid banner
|
// Fail due to invalid banner
|
||||||
k, rest, err = UnmarshalX25519PublicKey(rest)
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 public key banner")
|
assert.EqualError(t, err, "bytes did not contain a proper nebula public key banner")
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
// Fail due to ivalid PEM format, because
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
k, rest, err = UnmarshalX25519PublicKey(rest)
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
@@ -816,13 +1088,56 @@ func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []
|
|||||||
nc.Details.Groups = groups
|
nc.Details.Groups = groups
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(priv)
|
err = nc.Sign(Curve_CURVE25519, priv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
return nc, pub, priv, nil
|
return nc, pub, priv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newTestCaCertP256(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
||||||
|
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
|
||||||
|
rawPriv := priv.D.FillBytes(make([]byte, 32))
|
||||||
|
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
nc := &NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: "test ca",
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: true,
|
||||||
|
Curve: Curve_P256,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ips) > 0 {
|
||||||
|
nc.Details.Ips = ips
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subnets) > 0 {
|
||||||
|
nc.Details.Subnets = subnets
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(groups) > 0 {
|
||||||
|
nc.Details.Groups = groups
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(Curve_P256, rawPriv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
return nc, pub, rawPriv, nil
|
||||||
|
}
|
||||||
|
|
||||||
func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
||||||
issuer, err := ca.Sha256Sum()
|
issuer, err := ca.Sha256Sum()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -856,7 +1171,16 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, rawPriv := x25519Keypair()
|
var pub, rawPriv []byte
|
||||||
|
|
||||||
|
switch ca.Details.Curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
pub, rawPriv = x25519Keypair()
|
||||||
|
case Curve_P256:
|
||||||
|
pub, rawPriv = p256Keypair()
|
||||||
|
default:
|
||||||
|
return nil, nil, nil, fmt.Errorf("unknown curve: %v", ca.Details.Curve)
|
||||||
|
}
|
||||||
|
|
||||||
nc := &NebulaCertificate{
|
nc := &NebulaCertificate{
|
||||||
Details: NebulaCertificateDetails{
|
Details: NebulaCertificateDetails{
|
||||||
@@ -868,12 +1192,13 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
|
|||||||
NotAfter: time.Unix(after.Unix(), 0),
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: false,
|
IsCA: false,
|
||||||
|
Curve: ca.Details.Curve,
|
||||||
Issuer: issuer,
|
Issuer: issuer,
|
||||||
InvertedGroups: make(map[string]struct{}),
|
InvertedGroups: make(map[string]struct{}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(key)
|
err = nc.Sign(ca.Details.Curve, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -894,3 +1219,12 @@ func x25519Keypair() ([]byte, []byte) {
|
|||||||
|
|
||||||
return pubkey, privkey
|
return pubkey, privkey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func p256Keypair() ([]byte, []byte) {
|
||||||
|
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pubkey := privkey.PublicKey()
|
||||||
|
return pubkey.Bytes(), privkey.Bytes()
|
||||||
|
}
|
||||||
|
|||||||
143
cert/crypto.go
Normal file
143
cert/crypto.go
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package cert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/argon2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KDF factors
|
||||||
|
type Argon2Parameters struct {
|
||||||
|
version rune
|
||||||
|
Memory uint32 // KiB
|
||||||
|
Parallelism uint8
|
||||||
|
Iterations uint32
|
||||||
|
salt []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a new Argon2Parameters object with current version set
|
||||||
|
func NewArgon2Parameters(memory uint32, parallelism uint8, iterations uint32) *Argon2Parameters {
|
||||||
|
return &Argon2Parameters{
|
||||||
|
version: argon2.Version,
|
||||||
|
Memory: memory, // KiB
|
||||||
|
Parallelism: parallelism,
|
||||||
|
Iterations: iterations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypts data using AES-256-GCM and the Argon2id key derivation function
|
||||||
|
func aes256Encrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) {
|
||||||
|
key, err := aes256DeriveKey(passphrase, kdfParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// this should never happen, but since this dictates how our calls into the
|
||||||
|
// aes package behave and could be catastraphic, let's sanity check this
|
||||||
|
if len(key) != 32 {
|
||||||
|
return nil, fmt.Errorf("invalid AES-256 key length (%d) - cowardly refusing to encrypt", len(key))
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce := make([]byte, gcm.NonceSize())
|
||||||
|
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ciphertext := gcm.Seal(nil, nonce, data, nil)
|
||||||
|
blob := joinNonceCiphertext(nonce, ciphertext)
|
||||||
|
|
||||||
|
return blob, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypts data using AES-256-GCM and the Argon2id key derivation function
|
||||||
|
// Expects the data to include an Argon2id parameter string before the encrypted data
|
||||||
|
func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) {
|
||||||
|
key, err := aes256DeriveKey(passphrase, kdfParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid passphrase or corrupt private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
return plaintext, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func aes256DeriveKey(passphrase []byte, params *Argon2Parameters) ([]byte, error) {
|
||||||
|
if params.salt == nil {
|
||||||
|
params.salt = make([]byte, 32)
|
||||||
|
if _, err := rand.Read(params.salt); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// keySize of 32 bytes will result in AES-256 encryption
|
||||||
|
key, err := deriveKey(passphrase, 32, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derives a key from a passphrase using Argon2id
|
||||||
|
func deriveKey(passphrase []byte, keySize uint32, params *Argon2Parameters) ([]byte, error) {
|
||||||
|
if params.version != argon2.Version {
|
||||||
|
return nil, fmt.Errorf("incompatible Argon2 version: %d", params.version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if params.salt == nil {
|
||||||
|
return nil, fmt.Errorf("salt must be set in argon2Parameters")
|
||||||
|
} else if len(params.salt) < 16 {
|
||||||
|
return nil, fmt.Errorf("salt must be at least 128 bits")
|
||||||
|
}
|
||||||
|
|
||||||
|
key := argon2.IDKey(passphrase, params.salt, params.Iterations, params.Memory, params.Parallelism, keySize)
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepends nonce to ciphertext
|
||||||
|
func joinNonceCiphertext(nonce []byte, ciphertext []byte) []byte {
|
||||||
|
return append(nonce, ciphertext...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Splits nonce from ciphertext
|
||||||
|
func splitNonceCiphertext(blob []byte, nonceSize int) ([]byte, []byte, error) {
|
||||||
|
if len(blob) <= nonceSize {
|
||||||
|
return nil, nil, fmt.Errorf("invalid ciphertext blob - blob shorter than nonce length")
|
||||||
|
}
|
||||||
|
|
||||||
|
return blob[:nonceSize], blob[nonceSize:], nil
|
||||||
|
}
|
||||||
25
cert/crypto_test.go
Normal file
25
cert/crypto_test.go
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
package cert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/crypto/argon2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewArgon2Parameters(t *testing.T) {
|
||||||
|
p := NewArgon2Parameters(64*1024, 4, 3)
|
||||||
|
assert.EqualValues(t, &Argon2Parameters{
|
||||||
|
version: argon2.Version,
|
||||||
|
Memory: 64 * 1024,
|
||||||
|
Parallelism: 4,
|
||||||
|
Iterations: 3,
|
||||||
|
}, p)
|
||||||
|
p = NewArgon2Parameters(2*1024*1024, 2, 1)
|
||||||
|
assert.EqualValues(t, &Argon2Parameters{
|
||||||
|
version: argon2.Version,
|
||||||
|
Memory: 2 * 1024 * 1024,
|
||||||
|
Parallelism: 2,
|
||||||
|
Iterations: 1,
|
||||||
|
}, p)
|
||||||
|
}
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
package cert
|
package cert
|
||||||
|
|
||||||
import "errors"
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrExpired = errors.New("certificate is expired")
|
ErrRootExpired = errors.New("root certificate is expired")
|
||||||
ErrNotCA = errors.New("certificate is not a CA")
|
ErrExpired = errors.New("certificate is expired")
|
||||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
ErrNotCA = errors.New("certificate is not a CA")
|
||||||
|
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||||
|
ErrBlockListed = errors.New("certificate is in the block list")
|
||||||
|
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,28 +6,36 @@ import (
|
|||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Node struct {
|
type Node[T any] struct {
|
||||||
left *Node
|
left *Node[T]
|
||||||
right *Node
|
right *Node[T]
|
||||||
parent *Node
|
parent *Node[T]
|
||||||
value interface{}
|
hasValue bool
|
||||||
|
value T
|
||||||
}
|
}
|
||||||
|
|
||||||
type Tree4 struct {
|
type entry[T any] struct {
|
||||||
root *Node
|
CIDR *net.IPNet
|
||||||
|
Value T
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tree4[T any] struct {
|
||||||
|
root *Node[T]
|
||||||
|
list []entry[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
startbit = iputil.VpnIp(0x80000000)
|
startbit = iputil.VpnIp(0x80000000)
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewTree4() *Tree4 {
|
func NewTree4[T any]() *Tree4[T] {
|
||||||
tree := new(Tree4)
|
tree := new(Tree4[T])
|
||||||
tree.root = &Node{}
|
tree.root = &Node[T]{}
|
||||||
|
tree.list = []entry[T]{}
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
func (tree *Tree4[T]) AddCIDR(cidr *net.IPNet, val T) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
next := tree.root
|
next := tree.root
|
||||||
@@ -53,13 +61,23 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
|
|
||||||
// We already have this range so update the value
|
// We already have this range so update the value
|
||||||
if next != nil {
|
if next != nil {
|
||||||
|
addCIDR := cidr.String()
|
||||||
|
for i, v := range tree.list {
|
||||||
|
if addCIDR == v.CIDR.String() {
|
||||||
|
tree.list = append(tree.list[:i], tree.list[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
|
||||||
node.value = val
|
node.value = val
|
||||||
|
node.hasValue = true
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build up the rest of the tree we don't already have
|
// Build up the rest of the tree we don't already have
|
||||||
for bit&mask != 0 {
|
for bit&mask != 0 {
|
||||||
next = &Node{}
|
next = &Node[T]{}
|
||||||
next.parent = node
|
next.parent = node
|
||||||
|
|
||||||
if ip&bit != 0 {
|
if ip&bit != 0 {
|
||||||
@@ -74,16 +92,18 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
|
|
||||||
// Final node marks our cidr, set the value
|
// Final node marks our cidr, set the value
|
||||||
node.value = val
|
node.value = val
|
||||||
|
node.hasValue = true
|
||||||
|
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the first match, which may be the least specific
|
// Contains finds the first match, which may be the least specific
|
||||||
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
func (tree *Tree4[T]) Contains(ip iputil.VpnIp) (ok bool, value T) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
|
|
||||||
for node != nil {
|
for node != nil {
|
||||||
if node.value != nil {
|
if node.hasValue {
|
||||||
return node.value
|
return true, node.value
|
||||||
}
|
}
|
||||||
|
|
||||||
if ip&bit != 0 {
|
if ip&bit != 0 {
|
||||||
@@ -96,17 +116,18 @@ func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return value
|
return false, value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the most specific match
|
// MostSpecificContains finds the most specific match
|
||||||
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
func (tree *Tree4[T]) MostSpecificContains(ip iputil.VpnIp) (ok bool, value T) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
|
|
||||||
for node != nil {
|
for node != nil {
|
||||||
if node.value != nil {
|
if node.hasValue {
|
||||||
value = node.value
|
value = node.value
|
||||||
|
ok = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if ip&bit != 0 {
|
if ip&bit != 0 {
|
||||||
@@ -118,11 +139,12 @@ func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
|||||||
bit >>= 1
|
bit >>= 1
|
||||||
}
|
}
|
||||||
|
|
||||||
return value
|
return ok, value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the most specific match
|
// Match finds the most specific match
|
||||||
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
// TODO this is exact match
|
||||||
|
func (tree *Tree4[T]) Match(ip iputil.VpnIp) (ok bool, value T) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
lastNode := node
|
lastNode := node
|
||||||
@@ -140,6 +162,12 @@ func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
|||||||
|
|
||||||
if bit == 0 && lastNode != nil {
|
if bit == 0 && lastNode != nil {
|
||||||
value = lastNode.value
|
value = lastNode.value
|
||||||
|
ok = true
|
||||||
}
|
}
|
||||||
return value
|
return ok, value
|
||||||
|
}
|
||||||
|
|
||||||
|
// List will return all CIDRs and their current values. Do not modify the contents!
|
||||||
|
func (tree *Tree4[T]) List() []entry[T] {
|
||||||
|
return tree.list
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,8 +8,22 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestCIDRTree_List(t *testing.T) {
|
||||||
|
tree := NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "4")
|
||||||
|
list := tree.List()
|
||||||
|
assert.Len(t, list, 2)
|
||||||
|
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
|
||||||
|
assert.Equal(t, "2", list[0].Value)
|
||||||
|
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
|
||||||
|
assert.Equal(t, "4", list[1].Value)
|
||||||
|
}
|
||||||
|
|
||||||
func TestCIDRTree_Contains(t *testing.T) {
|
func TestCIDRTree_Contains(t *testing.T) {
|
||||||
tree := NewTree4()
|
tree := NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
@@ -19,35 +33,43 @@ func TestCIDRTree_Contains(t *testing.T) {
|
|||||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
Result interface{}
|
Result interface{}
|
||||||
IP string
|
IP string
|
||||||
}{
|
}{
|
||||||
{"1", "1.0.0.0"},
|
{true, "1", "1.0.0.0"},
|
||||||
{"1", "1.255.255.255"},
|
{true, "1", "1.255.255.255"},
|
||||||
{"2", "2.1.0.0"},
|
{true, "2", "2.1.0.0"},
|
||||||
{"2", "2.1.255.255"},
|
{true, "2", "2.1.255.255"},
|
||||||
{"3", "3.1.1.0"},
|
{true, "3", "3.1.1.0"},
|
||||||
{"3", "3.1.1.255"},
|
{true, "3", "3.1.1.255"},
|
||||||
{"4a", "4.1.1.255"},
|
{true, "4a", "4.1.1.255"},
|
||||||
{"4a", "4.1.1.1"},
|
{true, "4a", "4.1.1.1"},
|
||||||
{"5", "240.0.0.0"},
|
{true, "5", "240.0.0.0"},
|
||||||
{"5", "255.255.255.255"},
|
{true, "5", "255.255.255.255"},
|
||||||
{nil, "239.0.0.0"},
|
{false, "", "239.0.0.0"},
|
||||||
{nil, "4.1.2.2"},
|
{false, "", "4.1.2.2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
tree = NewTree4()
|
tree = NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
|
||||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
||||||
tree := NewTree4()
|
tree := NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
@@ -57,59 +79,75 @@ func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
|||||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
Result interface{}
|
Result interface{}
|
||||||
IP string
|
IP string
|
||||||
}{
|
}{
|
||||||
{"1", "1.0.0.0"},
|
{true, "1", "1.0.0.0"},
|
||||||
{"1", "1.255.255.255"},
|
{true, "1", "1.255.255.255"},
|
||||||
{"2", "2.1.0.0"},
|
{true, "2", "2.1.0.0"},
|
||||||
{"2", "2.1.255.255"},
|
{true, "2", "2.1.255.255"},
|
||||||
{"3", "3.1.1.0"},
|
{true, "3", "3.1.1.0"},
|
||||||
{"3", "3.1.1.255"},
|
{true, "3", "3.1.1.255"},
|
||||||
{"4a", "4.1.1.255"},
|
{true, "4a", "4.1.1.255"},
|
||||||
{"4b", "4.1.1.2"},
|
{true, "4b", "4.1.1.2"},
|
||||||
{"4c", "4.1.1.1"},
|
{true, "4c", "4.1.1.1"},
|
||||||
{"5", "240.0.0.0"},
|
{true, "5", "240.0.0.0"},
|
||||||
{"5", "255.255.255.255"},
|
{true, "5", "255.255.255.255"},
|
||||||
{nil, "239.0.0.0"},
|
{false, "", "239.0.0.0"},
|
||||||
{nil, "4.1.2.2"},
|
{false, "", "4.1.2.2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
tree = NewTree4()
|
tree = NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
|
||||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCIDRTree_Match(t *testing.T) {
|
func TestCIDRTree_Match(t *testing.T) {
|
||||||
tree := NewTree4()
|
tree := NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
|
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
|
||||||
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
|
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
Result interface{}
|
Result interface{}
|
||||||
IP string
|
IP string
|
||||||
}{
|
}{
|
||||||
{"1a", "4.1.1.0"},
|
{true, "1a", "4.1.1.0"},
|
||||||
{"1b", "4.1.1.1"},
|
{true, "1b", "4.1.1.1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
ok, r := tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
tree = NewTree4()
|
tree = NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
|
||||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
||||||
tree := NewTree4()
|
tree := NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||||
@@ -131,7 +169,7 @@ func BenchmarkCIDRTree_Contains(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCIDRTree_Match(b *testing.B) {
|
func BenchmarkCIDRTree_Match(b *testing.B) {
|
||||||
tree := NewTree4()
|
tree := NewTree4[string]()
|
||||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||||
|
|||||||
@@ -8,20 +8,20 @@ import (
|
|||||||
|
|
||||||
const startbit6 = uint64(1 << 63)
|
const startbit6 = uint64(1 << 63)
|
||||||
|
|
||||||
type Tree6 struct {
|
type Tree6[T any] struct {
|
||||||
root4 *Node
|
root4 *Node[T]
|
||||||
root6 *Node
|
root6 *Node[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTree6() *Tree6 {
|
func NewTree6[T any]() *Tree6[T] {
|
||||||
tree := new(Tree6)
|
tree := new(Tree6[T])
|
||||||
tree.root4 = &Node{}
|
tree.root4 = &Node[T]{}
|
||||||
tree.root6 = &Node{}
|
tree.root6 = &Node[T]{}
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
func (tree *Tree6[T]) AddCIDR(cidr *net.IPNet, val T) {
|
||||||
var node, next *Node
|
var node, next *Node[T]
|
||||||
|
|
||||||
cidrIP, ipv4 := isIPV4(cidr.IP)
|
cidrIP, ipv4 := isIPV4(cidr.IP)
|
||||||
if ipv4 {
|
if ipv4 {
|
||||||
@@ -56,7 +56,7 @@ func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
|
|
||||||
// Build up the rest of the tree we don't already have
|
// Build up the rest of the tree we don't already have
|
||||||
for bit&mask != 0 {
|
for bit&mask != 0 {
|
||||||
next = &Node{}
|
next = &Node[T]{}
|
||||||
next.parent = node
|
next.parent = node
|
||||||
|
|
||||||
if ip&bit != 0 {
|
if ip&bit != 0 {
|
||||||
@@ -72,11 +72,12 @@ func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
|
|
||||||
// Final node marks our cidr, set the value
|
// Final node marks our cidr, set the value
|
||||||
node.value = val
|
node.value = val
|
||||||
|
node.hasValue = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the most specific match
|
// Finds the most specific match
|
||||||
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
func (tree *Tree6[T]) MostSpecificContains(ip net.IP) (ok bool, value T) {
|
||||||
var node *Node
|
var node *Node[T]
|
||||||
|
|
||||||
wholeIP, ipv4 := isIPV4(ip)
|
wholeIP, ipv4 := isIPV4(ip)
|
||||||
if ipv4 {
|
if ipv4 {
|
||||||
@@ -90,8 +91,9 @@ func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
|||||||
bit := startbit
|
bit := startbit
|
||||||
|
|
||||||
for node != nil {
|
for node != nil {
|
||||||
if node.value != nil {
|
if node.hasValue {
|
||||||
value = node.value
|
value = node.value
|
||||||
|
ok = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if bit == 0 {
|
if bit == 0 {
|
||||||
@@ -108,16 +110,17 @@ func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return value
|
return ok, value
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
|
func (tree *Tree6[T]) MostSpecificContainsIpV4(ip iputil.VpnIp) (ok bool, value T) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root4
|
node := tree.root4
|
||||||
|
|
||||||
for node != nil {
|
for node != nil {
|
||||||
if node.value != nil {
|
if node.hasValue {
|
||||||
value = node.value
|
value = node.value
|
||||||
|
ok = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if ip&bit != 0 {
|
if ip&bit != 0 {
|
||||||
@@ -129,10 +132,10 @@ func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{})
|
|||||||
bit >>= 1
|
bit >>= 1
|
||||||
}
|
}
|
||||||
|
|
||||||
return value
|
return ok, value
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
func (tree *Tree6[T]) MostSpecificContainsIpV6(hi, lo uint64) (ok bool, value T) {
|
||||||
ip := hi
|
ip := hi
|
||||||
node := tree.root6
|
node := tree.root6
|
||||||
|
|
||||||
@@ -140,8 +143,9 @@ func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
|||||||
bit := startbit6
|
bit := startbit6
|
||||||
|
|
||||||
for node != nil {
|
for node != nil {
|
||||||
if node.value != nil {
|
if node.hasValue {
|
||||||
value = node.value
|
value = node.value
|
||||||
|
ok = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if bit == 0 {
|
if bit == 0 {
|
||||||
@@ -160,7 +164,7 @@ func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
|||||||
ip = lo
|
ip = lo
|
||||||
}
|
}
|
||||||
|
|
||||||
return value
|
return ok, value
|
||||||
}
|
}
|
||||||
|
|
||||||
func isIPV4(ip net.IP) (net.IP, bool) {
|
func isIPV4(ip net.IP) (net.IP, bool) {
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
||||||
tree := NewTree6()
|
tree := NewTree6[string]()
|
||||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
@@ -22,53 +22,68 @@ func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
|||||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
Result interface{}
|
Result interface{}
|
||||||
IP string
|
IP string
|
||||||
}{
|
}{
|
||||||
{"1", "1.0.0.0"},
|
{true, "1", "1.0.0.0"},
|
||||||
{"1", "1.255.255.255"},
|
{true, "1", "1.255.255.255"},
|
||||||
{"2", "2.1.0.0"},
|
{true, "2", "2.1.0.0"},
|
||||||
{"2", "2.1.255.255"},
|
{true, "2", "2.1.255.255"},
|
||||||
{"3", "3.1.1.0"},
|
{true, "3", "3.1.1.0"},
|
||||||
{"3", "3.1.1.255"},
|
{true, "3", "3.1.1.255"},
|
||||||
{"4a", "4.1.1.255"},
|
{true, "4a", "4.1.1.255"},
|
||||||
{"4b", "4.1.1.2"},
|
{true, "4b", "4.1.1.2"},
|
||||||
{"4c", "4.1.1.1"},
|
{true, "4c", "4.1.1.1"},
|
||||||
{"5", "240.0.0.0"},
|
{true, "5", "240.0.0.0"},
|
||||||
{"5", "255.255.255.255"},
|
{true, "5", "255.255.255.255"},
|
||||||
{"6a", "1:2:0:4:1:1:1:1"},
|
{true, "6a", "1:2:0:4:1:1:1:1"},
|
||||||
{"6b", "1:2:0:4:5:1:1:1"},
|
{true, "6b", "1:2:0:4:5:1:1:1"},
|
||||||
{"6c", "1:2:0:4:5:0:0:0"},
|
{true, "6c", "1:2:0:4:5:0:0:0"},
|
||||||
{nil, "239.0.0.0"},
|
{false, "", "239.0.0.0"},
|
||||||
{nil, "4.1.2.2"},
|
{false, "", "4.1.2.2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
|
ok, r := tree.MostSpecificContains(net.ParseIP(tt.IP))
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
tree = NewTree6()
|
tree = NewTree6[string]()
|
||||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
tree.AddCIDR(Parse("::/0"), "cool6")
|
tree.AddCIDR(Parse("::/0"), "cool6")
|
||||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
|
ok, r := tree.MostSpecificContains(net.ParseIP("0.0.0.0"))
|
||||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
|
assert.True(t, ok)
|
||||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
|
assert.Equal(t, "cool", r)
|
||||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
|
|
||||||
|
ok, r = tree.MostSpecificContains(net.ParseIP("255.255.255.255"))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.MostSpecificContains(net.ParseIP("::"))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool6", r)
|
||||||
|
|
||||||
|
ok, r = tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8"))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool6", r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
||||||
tree := NewTree6()
|
tree := NewTree6[string]()
|
||||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
Result interface{}
|
Result interface{}
|
||||||
IP string
|
IP string
|
||||||
}{
|
}{
|
||||||
{"6a", "1:2:0:4:1:1:1:1"},
|
{true, "6a", "1:2:0:4:1:1:1:1"},
|
||||||
{"6b", "1:2:0:4:5:1:1:1"},
|
{true, "6b", "1:2:0:4:5:1:1:1"},
|
||||||
{"6c", "1:2:0:4:5:0:0:0"},
|
{true, "6c", "1:2:0:4:5:0:0:0"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -76,6 +91,8 @@ func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
|||||||
hi := binary.BigEndian.Uint64(ip[:8])
|
hi := binary.BigEndian.Uint64(ip[:8])
|
||||||
lo := binary.BigEndian.Uint64(ip[8:])
|
lo := binary.BigEndian.Uint64(ip[8:])
|
||||||
|
|
||||||
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
|
ok, r := tree.MostSpecificContainsIpV6(hi, lo)
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -17,15 +19,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type caFlags struct {
|
type caFlags struct {
|
||||||
set *flag.FlagSet
|
set *flag.FlagSet
|
||||||
name *string
|
name *string
|
||||||
duration *time.Duration
|
duration *time.Duration
|
||||||
outKeyPath *string
|
outKeyPath *string
|
||||||
outCertPath *string
|
outCertPath *string
|
||||||
outQRPath *string
|
outQRPath *string
|
||||||
groups *string
|
groups *string
|
||||||
ips *string
|
ips *string
|
||||||
subnets *string
|
subnets *string
|
||||||
|
argonMemory *uint
|
||||||
|
argonIterations *uint
|
||||||
|
argonParallelism *uint
|
||||||
|
encryption *bool
|
||||||
|
|
||||||
|
curve *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCaFlags() *caFlags {
|
func newCaFlags() *caFlags {
|
||||||
@@ -39,10 +47,29 @@ func newCaFlags() *caFlags {
|
|||||||
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
||||||
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses")
|
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses")
|
||||||
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets")
|
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets")
|
||||||
|
cf.argonMemory = cf.set.Uint("argon-memory", 2*1024*1024, "Optional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase")
|
||||||
|
cf.argonParallelism = cf.set.Uint("argon-parallelism", 4, "Optional: Argon2 parallelism parameter used for encrypted private key passphrase")
|
||||||
|
cf.argonIterations = cf.set.Uint("argon-iterations", 1, "Optional: Argon2 iterations parameter used for encrypted private key passphrase")
|
||||||
|
cf.encryption = cf.set.Bool("encrypt", false, "Optional: prompt for passphrase and write out-key in an encrypted format")
|
||||||
|
cf.curve = cf.set.String("curve", "25519", "EdDSA/ECDSA Curve (25519, P256)")
|
||||||
return &cf
|
return &cf
|
||||||
}
|
}
|
||||||
|
|
||||||
func ca(args []string, out io.Writer, errOut io.Writer) error {
|
func parseArgonParameters(memory uint, parallelism uint, iterations uint) (*cert.Argon2Parameters, error) {
|
||||||
|
if memory <= 0 || memory > math.MaxUint32 {
|
||||||
|
return nil, newHelpErrorf("-argon-memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
if parallelism <= 0 || parallelism > math.MaxUint8 {
|
||||||
|
return nil, newHelpErrorf("-argon-parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
|
||||||
|
}
|
||||||
|
if iterations <= 0 || iterations > math.MaxUint32 {
|
||||||
|
return nil, newHelpErrorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cert.NewArgon2Parameters(uint32(memory), uint8(parallelism), uint32(iterations)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
|
||||||
cf := newCaFlags()
|
cf := newCaFlags()
|
||||||
err := cf.set.Parse(args)
|
err := cf.set.Parse(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -58,6 +85,12 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
|
if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
var kdfParams *cert.Argon2Parameters
|
||||||
|
if *cf.encryption {
|
||||||
|
if kdfParams, err = parseArgonParameters(*cf.argonMemory, *cf.argonParallelism, *cf.argonIterations); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if *cf.duration <= 0 {
|
if *cf.duration <= 0 {
|
||||||
return &helpError{"-duration must be greater than 0"}
|
return &helpError{"-duration must be greater than 0"}
|
||||||
@@ -109,9 +142,47 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, rawPriv, err := ed25519.GenerateKey(rand.Reader)
|
var passphrase []byte
|
||||||
if err != nil {
|
if *cf.encryption {
|
||||||
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
for i := 0; i < 5; i++ {
|
||||||
|
out.Write([]byte("Enter passphrase: "))
|
||||||
|
passphrase, err = pr.ReadPassword()
|
||||||
|
|
||||||
|
if err == ErrNoTerminal {
|
||||||
|
return fmt.Errorf("out-key must be encrypted interactively")
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("error reading passphrase: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(passphrase) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
return fmt.Errorf("no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var curve cert.Curve
|
||||||
|
var pub, rawPriv []byte
|
||||||
|
switch *cf.curve {
|
||||||
|
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||||
|
curve = cert.Curve_CURVE25519
|
||||||
|
pub, rawPriv, err = ed25519.GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
||||||
|
}
|
||||||
|
case "P256":
|
||||||
|
var key *ecdsa.PrivateKey
|
||||||
|
curve = cert.Curve_P256
|
||||||
|
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||||
|
}
|
||||||
|
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
|
||||||
|
rawPriv = key.D.FillBytes(make([]byte, 32))
|
||||||
|
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y)
|
||||||
}
|
}
|
||||||
|
|
||||||
nc := cert.NebulaCertificate{
|
nc := cert.NebulaCertificate{
|
||||||
@@ -124,6 +195,7 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
NotAfter: time.Now().Add(*cf.duration),
|
NotAfter: time.Now().Add(*cf.duration),
|
||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: true,
|
IsCA: true,
|
||||||
|
Curve: curve,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,22 +207,32 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
|
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(rawPriv)
|
err = nc.Sign(curve, rawPriv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while signing: %s", err)
|
return fmt.Errorf("error while signing: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalEd25519PrivateKey(rawPriv), 0600)
|
var b []byte
|
||||||
|
if *cf.encryption {
|
||||||
|
b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while encrypting out-key: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b = cert.MarshalSigningPrivateKey(curve, rawPriv)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.WriteFile(*cf.outKeyPath, b, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := nc.MarshalToPEM()
|
b, err = nc.MarshalToPEM()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outCertPath, b, 0600)
|
err = os.WriteFile(*cf.outCertPath, b, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||||
}
|
}
|
||||||
@@ -161,7 +243,7 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while generating qr code: %s", err)
|
return fmt.Errorf("error while generating qr code: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
|
err = os.WriteFile(*cf.outQRPath, b, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,8 +5,10 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -26,8 +28,18 @@ func Test_caHelp(t *testing.T) {
|
|||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"Usage of "+os.Args[0]+" ca <flags>: create a self signed certificate authority\n"+
|
"Usage of "+os.Args[0]+" ca <flags>: create a self signed certificate authority\n"+
|
||||||
|
" -argon-iterations uint\n"+
|
||||||
|
" \tOptional: Argon2 iterations parameter used for encrypted private key passphrase (default 1)\n"+
|
||||||
|
" -argon-memory uint\n"+
|
||||||
|
" \tOptional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase (default 2097152)\n"+
|
||||||
|
" -argon-parallelism uint\n"+
|
||||||
|
" \tOptional: Argon2 parallelism parameter used for encrypted private key passphrase (default 4)\n"+
|
||||||
|
" -curve string\n"+
|
||||||
|
" \tEdDSA/ECDSA Curve (25519, P256) (default \"25519\")\n"+
|
||||||
" -duration duration\n"+
|
" -duration duration\n"+
|
||||||
" \tOptional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\" (default 8760h0m0s)\n"+
|
" \tOptional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\" (default 8760h0m0s)\n"+
|
||||||
|
" -encrypt\n"+
|
||||||
|
" \tOptional: prompt for passphrase and write out-key in an encrypted format\n"+
|
||||||
" -groups string\n"+
|
" -groups string\n"+
|
||||||
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
|
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
|
||||||
" -ips string\n"+
|
" -ips string\n"+
|
||||||
@@ -50,18 +62,38 @@ func Test_ca(t *testing.T) {
|
|||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
eb := &bytes.Buffer{}
|
eb := &bytes.Buffer{}
|
||||||
|
|
||||||
|
nopw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
errpw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: errors.New("stub error"),
|
||||||
|
}
|
||||||
|
|
||||||
|
passphrase := []byte("DO NOT USE THIS KEY")
|
||||||
|
testpw := &StubPasswordReader{
|
||||||
|
password: passphrase,
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
pwPromptOb := "Enter passphrase: "
|
||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, ca([]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb), "-name is required")
|
assertHelpError(t, ca(
|
||||||
|
[]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
|
||||||
|
), "-name is required")
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// ipv4 only ips
|
// ipv4 only ips
|
||||||
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// ipv4 only subnets
|
// ipv4 only subnets
|
||||||
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
@@ -69,12 +101,12 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp key file
|
// create temp key file
|
||||||
keyF, err := ioutil.TempFile("", "test.key")
|
keyF, err := os.CreateTemp("", "test.key")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
@@ -82,12 +114,12 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp cert file
|
// create temp cert file
|
||||||
crtF, err := ioutil.TempFile("", "test.crt")
|
crtF, err := os.CreateTemp("", "test.crt")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
@@ -96,18 +128,18 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.Nil(t, ca(args, ob, eb))
|
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// read cert and key files
|
// read cert and key files
|
||||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
rb, _ := os.ReadFile(keyF.Name())
|
||||||
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
|
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
|
||||||
assert.Len(t, b, 0)
|
assert.Len(t, b, 0)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Len(t, lKey, 64)
|
assert.Len(t, lKey, 64)
|
||||||
|
|
||||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
rb, _ = os.ReadFile(crtF.Name())
|
||||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||||
assert.Len(t, b, 0)
|
assert.Len(t, b, 0)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -122,19 +154,67 @@ func Test_ca(t *testing.T) {
|
|||||||
assert.Equal(t, "", lCrt.Details.Issuer)
|
assert.Equal(t, "", lCrt.Details.Issuer)
|
||||||
assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey))
|
assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey))
|
||||||
|
|
||||||
|
// test encrypted key
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
|
assert.Nil(t, ca(args, ob, eb, testpw))
|
||||||
|
assert.Equal(t, pwPromptOb, ob.String())
|
||||||
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
|
// read encrypted key file and verify default params
|
||||||
|
rb, _ = os.ReadFile(keyF.Name())
|
||||||
|
k, _ := pem.Decode(rb)
|
||||||
|
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
// we won't know salt in advance, so just check start of string
|
||||||
|
assert.Equal(t, uint32(2*1024*1024), ned.EncryptionMetadata.Argon2Parameters.Memory)
|
||||||
|
assert.Equal(t, uint8(4), ned.EncryptionMetadata.Argon2Parameters.Parallelism)
|
||||||
|
assert.Equal(t, uint32(1), ned.EncryptionMetadata.Argon2Parameters.Iterations)
|
||||||
|
|
||||||
|
// verify the key is valid and decrypt-able
|
||||||
|
var curve cert.Curve
|
||||||
|
curve, lKey, b, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rb)
|
||||||
|
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Len(t, b, 0)
|
||||||
|
assert.Len(t, lKey, 64)
|
||||||
|
|
||||||
|
// test when reading passsword results in an error
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
|
assert.Error(t, ca(args, ob, eb, errpw))
|
||||||
|
assert.Equal(t, pwPromptOb, ob.String())
|
||||||
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
|
// test when user fails to enter a password
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
|
assert.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||||
|
assert.Equal(t, strings.Repeat(pwPromptOb, 5), ob.String()) // prompts 5 times before giving up
|
||||||
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create valid cert/key for overwrite tests
|
// create valid cert/key for overwrite tests
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.Nil(t, ca(args, ob, eb))
|
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing certificate file
|
// test that we won't overwrite existing certificate file
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA key: "+keyF.Name())
|
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name())
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
@@ -143,7 +223,7 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA cert: "+crtF.Name())
|
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name())
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
@@ -14,6 +13,8 @@ type keygenFlags struct {
|
|||||||
set *flag.FlagSet
|
set *flag.FlagSet
|
||||||
outKeyPath *string
|
outKeyPath *string
|
||||||
outPubPath *string
|
outPubPath *string
|
||||||
|
|
||||||
|
curve *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newKeygenFlags() *keygenFlags {
|
func newKeygenFlags() *keygenFlags {
|
||||||
@@ -21,6 +22,7 @@ func newKeygenFlags() *keygenFlags {
|
|||||||
cf.set.Usage = func() {}
|
cf.set.Usage = func() {}
|
||||||
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
|
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
|
||||||
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
|
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
|
||||||
|
cf.curve = cf.set.String("curve", "25519", "ECDH Curve (25519, P256)")
|
||||||
return &cf
|
return &cf
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,14 +40,25 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, rawPriv := x25519Keypair()
|
var pub, rawPriv []byte
|
||||||
|
var curve cert.Curve
|
||||||
|
switch *cf.curve {
|
||||||
|
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||||
|
pub, rawPriv = x25519Keypair()
|
||||||
|
curve = cert.Curve_CURVE25519
|
||||||
|
case "P256":
|
||||||
|
pub, rawPriv = p256Keypair()
|
||||||
|
curve = cert.Curve_P256
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||||
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600)
|
err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalX25519PublicKey(pub), 0600)
|
err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-pub: %s", err)
|
return fmt.Errorf("error while writing out-pub: %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -22,6 +21,8 @@ func Test_keygenHelp(t *testing.T) {
|
|||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"Usage of "+os.Args[0]+" keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`\n"+
|
"Usage of "+os.Args[0]+" keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`\n"+
|
||||||
|
" -curve string\n"+
|
||||||
|
" \tECDH Curve (25519, P256) (default \"25519\")\n"+
|
||||||
" -out-key string\n"+
|
" -out-key string\n"+
|
||||||
" \tRequired: path to write the private key to\n"+
|
" \tRequired: path to write the private key to\n"+
|
||||||
" -out-pub string\n"+
|
" -out-pub string\n"+
|
||||||
@@ -52,7 +53,7 @@ func Test_keygen(t *testing.T) {
|
|||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp key file
|
// create temp key file
|
||||||
keyF, err := ioutil.TempFile("", "test.key")
|
keyF, err := os.CreateTemp("", "test.key")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(keyF.Name())
|
defer os.Remove(keyF.Name())
|
||||||
|
|
||||||
@@ -65,7 +66,7 @@ func Test_keygen(t *testing.T) {
|
|||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp pub file
|
// create temp pub file
|
||||||
pubF, err := ioutil.TempFile("", "test.pub")
|
pubF, err := os.CreateTemp("", "test.pub")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(pubF.Name())
|
defer os.Remove(pubF.Name())
|
||||||
|
|
||||||
@@ -78,13 +79,13 @@ func Test_keygen(t *testing.T) {
|
|||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// read cert and key files
|
// read cert and key files
|
||||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
rb, _ := os.ReadFile(keyF.Name())
|
||||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||||
assert.Len(t, b, 0)
|
assert.Len(t, b, 0)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Len(t, lKey, 32)
|
assert.Len(t, lKey, 32)
|
||||||
|
|
||||||
rb, _ = ioutil.ReadFile(pubF.Name())
|
rb, _ = os.ReadFile(pubF.Name())
|
||||||
lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
|
lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
|
||||||
assert.Len(t, b, 0)
|
assert.Len(t, b, 0)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|||||||
@@ -62,11 +62,11 @@ func main() {
|
|||||||
|
|
||||||
switch args[0] {
|
switch args[0] {
|
||||||
case "ca":
|
case "ca":
|
||||||
err = ca(args[1:], os.Stdout, os.Stderr)
|
err = ca(args[1:], os.Stdout, os.Stderr, StdinPasswordReader{})
|
||||||
case "keygen":
|
case "keygen":
|
||||||
err = keygen(args[1:], os.Stdout, os.Stderr)
|
err = keygen(args[1:], os.Stdout, os.Stderr)
|
||||||
case "sign":
|
case "sign":
|
||||||
err = signCert(args[1:], os.Stdout, os.Stderr)
|
err = signCert(args[1:], os.Stdout, os.Stderr, StdinPasswordReader{})
|
||||||
case "print":
|
case "print":
|
||||||
err = printCert(args[1:], os.Stdout, os.Stderr)
|
err = printCert(args[1:], os.Stdout, os.Stderr)
|
||||||
case "verify":
|
case "verify":
|
||||||
@@ -127,6 +127,8 @@ func help(err string, out io.Writer) {
|
|||||||
fmt.Fprintln(out, " "+signSummary())
|
fmt.Fprintln(out, " "+signSummary())
|
||||||
fmt.Fprintln(out, " "+printSummary())
|
fmt.Fprintln(out, " "+printSummary())
|
||||||
fmt.Fprintln(out, " "+verifySummary())
|
fmt.Fprintln(out, " "+verifySummary())
|
||||||
|
fmt.Fprintln(out, "")
|
||||||
|
fmt.Fprintf(out, " To see usage for a given mode, use %s <mode> -h\n", os.Args[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustFlagString(name string, val *string) error {
|
func mustFlagString(name string, val *string) error {
|
||||||
|
|||||||
@@ -22,7 +22,9 @@ func Test_help(t *testing.T) {
|
|||||||
" " + keygenSummary() + "\n" +
|
" " + keygenSummary() + "\n" +
|
||||||
" " + signSummary() + "\n" +
|
" " + signSummary() + "\n" +
|
||||||
" " + printSummary() + "\n" +
|
" " + printSummary() + "\n" +
|
||||||
" " + verifySummary() + "\n"
|
" " + verifySummary() + "\n" +
|
||||||
|
"\n" +
|
||||||
|
" To see usage for a given mode, use " + os.Args[0] + " <mode> -h\n"
|
||||||
|
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
|
|
||||||
|
|||||||
28
cmd/nebula-cert/passwords.go
Normal file
28
cmd/nebula-cert/passwords.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/term"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrNoTerminal = errors.New("cannot read password from nonexistent terminal")
|
||||||
|
|
||||||
|
type PasswordReader interface {
|
||||||
|
ReadPassword() ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type StdinPasswordReader struct{}
|
||||||
|
|
||||||
|
func (pr StdinPasswordReader) ReadPassword() ([]byte, error) {
|
||||||
|
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||||
|
return nil, ErrNoTerminal
|
||||||
|
}
|
||||||
|
|
||||||
|
password, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
return password, err
|
||||||
|
}
|
||||||
10
cmd/nebula-cert/passwords_test.go
Normal file
10
cmd/nebula-cert/passwords_test.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
type StubPasswordReader struct {
|
||||||
|
password []byte
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pr *StubPasswordReader) ReadPassword() ([]byte, error) {
|
||||||
|
return pr.password, pr.err
|
||||||
|
}
|
||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -41,7 +40,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rawCert, err := ioutil.ReadFile(*pf.path)
|
rawCert, err := os.ReadFile(*pf.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to read cert; %s", err)
|
return fmt.Errorf("unable to read cert; %s", err)
|
||||||
}
|
}
|
||||||
@@ -87,7 +86,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while generating qr code: %s", err)
|
return fmt.Errorf("error while generating qr code: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
|
err = os.WriteFile(*pf.outQRPath, b, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -54,7 +53,7 @@ func Test_printCert(t *testing.T) {
|
|||||||
// invalid cert at path
|
// invalid cert at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
tf, err := ioutil.TempFile("", "print-cert")
|
tf, err := os.CreateTemp("", "print-cert")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tf.Name())
|
defer os.Remove(tf.Name())
|
||||||
|
|
||||||
@@ -87,7 +86,7 @@ func Test_printCert(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n",
|
"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n",
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
@@ -115,7 +114,7 @@ func Test_printCert(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n",
|
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n",
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ecdh"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -49,7 +49,7 @@ func newSignFlags() *signFlags {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
|
||||||
sf := newSignFlags()
|
sf := newSignFlags()
|
||||||
err := sf.set.Parse(args)
|
err := sf.set.Parse(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -72,17 +72,46 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return newHelpErrorf("cannot set both -in-pub and -out-key")
|
return newHelpErrorf("cannot set both -in-pub and -out-key")
|
||||||
}
|
}
|
||||||
|
|
||||||
rawCAKey, err := ioutil.ReadFile(*sf.caKeyPath)
|
rawCAKey, err := os.ReadFile(*sf.caKeyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading ca-key: %s", err)
|
return fmt.Errorf("error while reading ca-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
caKey, _, err := cert.UnmarshalEd25519PrivateKey(rawCAKey)
|
var curve cert.Curve
|
||||||
if err != nil {
|
var caKey []byte
|
||||||
|
|
||||||
|
// naively attempt to decode the private key as though it is not encrypted
|
||||||
|
caKey, _, curve, err = cert.UnmarshalSigningPrivateKey(rawCAKey)
|
||||||
|
if err == cert.ErrPrivateKeyEncrypted {
|
||||||
|
// ask for a passphrase until we get one
|
||||||
|
var passphrase []byte
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
out.Write([]byte("Enter passphrase: "))
|
||||||
|
passphrase, err = pr.ReadPassword()
|
||||||
|
|
||||||
|
if err == ErrNoTerminal {
|
||||||
|
return fmt.Errorf("ca-key is encrypted and must be decrypted interactively")
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("error reading password: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(passphrase) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
return fmt.Errorf("cannot open encrypted ca-key without passphrase")
|
||||||
|
}
|
||||||
|
|
||||||
|
curve, caKey, _, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rawCAKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while parsing encrypted ca-key: %s", err)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
return fmt.Errorf("error while parsing ca-key: %s", err)
|
return fmt.Errorf("error while parsing ca-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawCACert, err := ioutil.ReadFile(*sf.caCertPath)
|
rawCACert, err := os.ReadFile(*sf.caCertPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading ca-crt: %s", err)
|
return fmt.Errorf("error while reading ca-crt: %s", err)
|
||||||
}
|
}
|
||||||
@@ -92,7 +121,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := caCert.VerifyPrivateKey(caKey); err != nil {
|
if err := caCert.VerifyPrivateKey(curve, caKey); err != nil {
|
||||||
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,16 +177,20 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
|
|
||||||
var pub, rawPriv []byte
|
var pub, rawPriv []byte
|
||||||
if *sf.inPubPath != "" {
|
if *sf.inPubPath != "" {
|
||||||
rawPub, err := ioutil.ReadFile(*sf.inPubPath)
|
rawPub, err := os.ReadFile(*sf.inPubPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading in-pub: %s", err)
|
return fmt.Errorf("error while reading in-pub: %s", err)
|
||||||
}
|
}
|
||||||
pub, _, err = cert.UnmarshalX25519PublicKey(rawPub)
|
var pubCurve cert.Curve
|
||||||
|
pub, _, pubCurve, err = cert.UnmarshalPublicKey(rawPub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while parsing in-pub: %s", err)
|
return fmt.Errorf("error while parsing in-pub: %s", err)
|
||||||
}
|
}
|
||||||
|
if pubCurve != curve {
|
||||||
|
return fmt.Errorf("curve of in-pub does not match ca")
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
pub, rawPriv = x25519Keypair()
|
pub, rawPriv = newKeypair(curve)
|
||||||
}
|
}
|
||||||
|
|
||||||
nc := cert.NebulaCertificate{
|
nc := cert.NebulaCertificate{
|
||||||
@@ -171,6 +204,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: false,
|
IsCA: false,
|
||||||
Issuer: issuer,
|
Issuer: issuer,
|
||||||
|
Curve: curve,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +224,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
|
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(caKey)
|
err = nc.Sign(curve, caKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while signing: %s", err)
|
return fmt.Errorf("error while signing: %s", err)
|
||||||
}
|
}
|
||||||
@@ -200,7 +234,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600)
|
err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
}
|
}
|
||||||
@@ -211,7 +245,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*sf.outCertPath, b, 0600)
|
err = os.WriteFile(*sf.outCertPath, b, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||||
}
|
}
|
||||||
@@ -222,7 +256,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while generating qr code: %s", err)
|
return fmt.Errorf("error while generating qr code: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
|
err = os.WriteFile(*sf.outQRPath, b, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||||
}
|
}
|
||||||
@@ -231,6 +265,17 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newKeypair(curve cert.Curve) ([]byte, []byte) {
|
||||||
|
switch curve {
|
||||||
|
case cert.Curve_CURVE25519:
|
||||||
|
return x25519Keypair()
|
||||||
|
case cert.Curve_P256:
|
||||||
|
return p256Keypair()
|
||||||
|
default:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func x25519Keypair() ([]byte, []byte) {
|
func x25519Keypair() ([]byte, []byte) {
|
||||||
privkey := make([]byte, 32)
|
privkey := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
@@ -245,6 +290,15 @@ func x25519Keypair() ([]byte, []byte) {
|
|||||||
return pubkey, privkey
|
return pubkey, privkey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func p256Keypair() ([]byte, []byte) {
|
||||||
|
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pubkey := privkey.PublicKey()
|
||||||
|
return pubkey.Bytes(), privkey.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
func signSummary() string {
|
func signSummary() string {
|
||||||
return "sign <flags>: create and sign a certificate"
|
return "sign <flags>: create and sign a certificate"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"io/ioutil"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -58,17 +58,39 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
eb := &bytes.Buffer{}
|
eb := &bytes.Buffer{}
|
||||||
|
|
||||||
|
nopw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
errpw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: errors.New("stub error"),
|
||||||
|
}
|
||||||
|
|
||||||
|
passphrase := []byte("DO NOT USE THIS KEY")
|
||||||
|
testpw := &StubPasswordReader{
|
||||||
|
password: passphrase,
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-name is required")
|
assertHelpError(t, signCert(
|
||||||
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||||
|
), "-name is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-ip is required")
|
assertHelpError(t, signCert(
|
||||||
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||||
|
), "-ip is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// cannot set -in-pub and -out-key
|
// cannot set -in-pub and -out-key
|
||||||
assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb), "cannot set both -in-pub and -out-key")
|
assertHelpError(t, signCert(
|
||||||
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb, nopw,
|
||||||
|
), "cannot set both -in-pub and -out-key")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -76,17 +98,17 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
||||||
|
|
||||||
// failed to unmarshal key
|
// failed to unmarshal key
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
caKeyF, err := ioutil.TempFile("", "sign-cert.key")
|
caKeyF, err := os.CreateTemp("", "sign-cert.key")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caKeyF.Name())
|
defer os.Remove(caKeyF.Name())
|
||||||
|
|
||||||
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -98,19 +120,19 @@ func Test_signCert(t *testing.T) {
|
|||||||
|
|
||||||
// failed to read cert
|
// failed to read cert
|
||||||
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// failed to unmarshal cert
|
// failed to unmarshal cert
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
caCrtF, err := ioutil.TempFile("", "sign-cert.crt")
|
caCrtF, err := os.CreateTemp("", "sign-cert.crt")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caCrtF.Name())
|
defer os.Remove(caCrtF.Name())
|
||||||
|
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -129,19 +151,19 @@ func Test_signCert(t *testing.T) {
|
|||||||
|
|
||||||
// failed to read pub
|
// failed to read pub
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// failed to unmarshal pub
|
// failed to unmarshal pub
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
inPubF, err := ioutil.TempFile("", "in.pub")
|
inPubF, err := os.CreateTemp("", "in.pub")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(inPubF.Name())
|
defer os.Remove(inPubF.Name())
|
||||||
|
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -155,14 +177,14 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: invalid CIDR address: a1.1.1.1/24")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: invalid CIDR address: a1.1.1.1/24")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -170,20 +192,20 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: invalid CIDR address: a")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: invalid CIDR address: a")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// mismatched ca key
|
// mismatched ca key
|
||||||
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
|
caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caKeyF2.Name())
|
defer os.Remove(caKeyF2.Name())
|
||||||
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
|
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
|
||||||
@@ -191,7 +213,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate does not match private key")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate does not match private key")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -199,12 +221,12 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// create temp key file
|
// create temp key file
|
||||||
keyF, err := ioutil.TempFile("", "test.key")
|
keyF, err := os.CreateTemp("", "test.key")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
@@ -212,13 +234,13 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
// create temp cert file
|
// create temp cert file
|
||||||
crtF, err := ioutil.TempFile("", "test.crt")
|
crtF, err := os.CreateTemp("", "test.crt")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
|
|
||||||
@@ -226,18 +248,18 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// read cert and key files
|
// read cert and key files
|
||||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
rb, _ := os.ReadFile(keyF.Name())
|
||||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||||
assert.Len(t, b, 0)
|
assert.Len(t, b, 0)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Len(t, lKey, 32)
|
assert.Len(t, lKey, 32)
|
||||||
|
|
||||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
rb, _ = os.ReadFile(crtF.Name())
|
||||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||||
assert.Len(t, b, 0)
|
assert.Len(t, b, 0)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -268,12 +290,12 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// read cert file and check pub key matches in-pub
|
// read cert file and check pub key matches in-pub
|
||||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
rb, _ = os.ReadFile(crtF.Name())
|
||||||
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
|
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||||
assert.Len(t, b, 0)
|
assert.Len(t, b, 0)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -283,7 +305,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -291,14 +313,14 @@ func Test_signCert(t *testing.T) {
|
|||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing key file
|
// test that we won't overwrite existing key file
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing key: "+keyF.Name())
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing key: "+keyF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -306,14 +328,83 @@ func Test_signCert(t *testing.T) {
|
|||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing certificate file
|
// test that we won't overwrite existing certificate file
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name())
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing cert: "+crtF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// create valid cert/key using encrypted CA key
|
||||||
|
os.Remove(caKeyF.Name())
|
||||||
|
os.Remove(caCrtF.Name())
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
caKeyF, err = os.CreateTemp("", "sign-cert.key")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(caKeyF.Name())
|
||||||
|
|
||||||
|
caCrtF, err = os.CreateTemp("", "sign-cert.crt")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(caCrtF.Name())
|
||||||
|
|
||||||
|
// generate the encrypted key
|
||||||
|
caPub, caPriv, _ = ed25519.GenerateKey(rand.Reader)
|
||||||
|
kdfParams := cert.NewArgon2Parameters(64*1024, 4, 3)
|
||||||
|
b, _ = cert.EncryptAndMarshalSigningPrivateKey(cert.Curve_CURVE25519, caPriv, passphrase, kdfParams)
|
||||||
|
caKeyF.Write(b)
|
||||||
|
|
||||||
|
ca = cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "ca",
|
||||||
|
NotBefore: time.Now(),
|
||||||
|
NotAfter: time.Now().Add(time.Minute * 200),
|
||||||
|
PublicKey: caPub,
|
||||||
|
IsCA: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
b, _ = ca.MarshalToPEM()
|
||||||
|
caCrtF.Write(b)
|
||||||
|
|
||||||
|
// test with the proper password
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Nil(t, signCert(args, ob, eb, testpw))
|
||||||
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// test with the wrong password
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
testpw.password = []byte("invalid password")
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Error(t, signCert(args, ob, eb, testpw))
|
||||||
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// test with the user not entering a password
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Error(t, signCert(args, ob, eb, nopw))
|
||||||
|
// normally the user hitting enter on the prompt would add newlines between these
|
||||||
|
assert.Equal(t, "Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// test an error condition
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Error(t, signCert(args, ob, eb, errpw))
|
||||||
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -40,7 +39,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rawCACert, err := ioutil.ReadFile(*vf.caPath)
|
rawCACert, err := os.ReadFile(*vf.caPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading ca: %s", err)
|
return fmt.Errorf("error while reading ca: %s", err)
|
||||||
}
|
}
|
||||||
@@ -57,7 +56,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rawCert, err := ioutil.ReadFile(*vf.certPath)
|
rawCert, err := os.ReadFile(*vf.certPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to read crt; %s", err)
|
return fmt.Errorf("unable to read crt; %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -56,7 +55,7 @@ func Test_verify(t *testing.T) {
|
|||||||
// invalid ca at path
|
// invalid ca at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
caFile, err := ioutil.TempFile("", "verify-ca")
|
caFile, err := os.CreateTemp("", "verify-ca")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caFile.Name())
|
defer os.Remove(caFile.Name())
|
||||||
|
|
||||||
@@ -77,7 +76,7 @@ func Test_verify(t *testing.T) {
|
|||||||
IsCA: true,
|
IsCA: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
ca.Sign(caPriv)
|
ca.Sign(cert.Curve_CURVE25519, caPriv)
|
||||||
b, _ := ca.MarshalToPEM()
|
b, _ := ca.MarshalToPEM()
|
||||||
caFile.Truncate(0)
|
caFile.Truncate(0)
|
||||||
caFile.Seek(0, 0)
|
caFile.Seek(0, 0)
|
||||||
@@ -92,7 +91,7 @@ func Test_verify(t *testing.T) {
|
|||||||
// invalid crt at path
|
// invalid crt at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
certFile, err := ioutil.TempFile("", "verify-cert")
|
certFile, err := os.CreateTemp("", "verify-cert")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(certFile.Name())
|
defer os.Remove(certFile.Name())
|
||||||
|
|
||||||
@@ -117,7 +116,7 @@ func Test_verify(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
crt.Sign(badPriv)
|
crt.Sign(cert.Curve_CURVE25519, badPriv)
|
||||||
b, _ = crt.MarshalToPEM()
|
b, _ = crt.MarshalToPEM()
|
||||||
certFile.Truncate(0)
|
certFile.Truncate(0)
|
||||||
certFile.Seek(0, 0)
|
certFile.Seek(0, 0)
|
||||||
@@ -129,7 +128,7 @@ func Test_verify(t *testing.T) {
|
|||||||
assert.EqualError(t, err, "certificate signature did not match")
|
assert.EqualError(t, err, "certificate signature did not match")
|
||||||
|
|
||||||
// verified cert at path
|
// verified cert at path
|
||||||
crt.Sign(caPriv)
|
crt.Sign(cert.Curve_CURVE25519, caPriv)
|
||||||
b, _ = crt.MarshalToPEM()
|
b, _ = crt.MarshalToPEM()
|
||||||
certFile.Truncate(0)
|
certFile.Truncate(0)
|
||||||
certFile.Seek(0, 0)
|
certFile.Seek(0, 0)
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
// A version string that can be set with
|
// A version string that can be set with
|
||||||
//
|
//
|
||||||
// -ldflags "-X main.Build=SOMEVERSION"
|
// -ldflags "-X main.Build=SOMEVERSION"
|
||||||
//
|
//
|
||||||
// at compile-time.
|
// at compile-time.
|
||||||
var Build string
|
var Build string
|
||||||
@@ -59,13 +59,8 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||||
|
if err != nil {
|
||||||
switch v := err.(type) {
|
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||||
case util.ContextualError:
|
|
||||||
v.Log(l)
|
|
||||||
os.Exit(1)
|
|
||||||
case error:
|
|
||||||
l.WithError(err).Error("Failed to start")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,6 +49,14 @@ func (p *program) Stop(s service.Service) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fileExists(filename string) bool {
|
||||||
|
_, err := os.Stat(filename)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
|
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
|
||||||
if *configPath == "" {
|
if *configPath == "" {
|
||||||
ex, err := os.Executable()
|
ex, err := os.Executable()
|
||||||
@@ -56,6 +64,9 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
*configPath = filepath.Dir(ex) + "/config.yaml"
|
*configPath = filepath.Dir(ex) + "/config.yaml"
|
||||||
|
if !fileExists(*configPath) {
|
||||||
|
*configPath = filepath.Dir(ex) + "/config.yml"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
svcConfig := &service.Config{
|
svcConfig := &service.Config{
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
// A version string that can be set with
|
// A version string that can be set with
|
||||||
//
|
//
|
||||||
// -ldflags "-X main.Build=SOMEVERSION"
|
// -ldflags "-X main.Build=SOMEVERSION"
|
||||||
//
|
//
|
||||||
// at compile-time.
|
// at compile-time.
|
||||||
var Build string
|
var Build string
|
||||||
@@ -53,18 +53,14 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||||
|
if err != nil {
|
||||||
switch v := err.(type) {
|
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||||
case util.ContextualError:
|
|
||||||
v.Log(l)
|
|
||||||
os.Exit(1)
|
|
||||||
case error:
|
|
||||||
l.WithError(err).Error("Failed to start")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !*configTest {
|
if !*configTest {
|
||||||
ctrl.Start()
|
ctrl.Start()
|
||||||
|
notifyReady(l)
|
||||||
ctrl.ShutdownBlock()
|
ctrl.ShutdownBlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
42
cmd/nebula/notify_linux.go
Normal file
42
cmd/nebula/notify_linux.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SdNotifyReady tells systemd the service is ready and dependent services can now be started
|
||||||
|
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||||
|
// https://www.freedesktop.org/software/systemd/man/systemd.service.html
|
||||||
|
const SdNotifyReady = "READY=1"
|
||||||
|
|
||||||
|
func notifyReady(l *logrus.Logger) {
|
||||||
|
sockName := os.Getenv("NOTIFY_SOCKET")
|
||||||
|
if sockName == "" {
|
||||||
|
l.Debugln("NOTIFY_SOCKET systemd env var not set, not sending ready signal")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := net.DialTimeout("unixgram", sockName, time.Second)
|
||||||
|
if err != nil {
|
||||||
|
l.WithError(err).Error("failed to connect to systemd notification socket")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
err = conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||||
|
if err != nil {
|
||||||
|
l.WithError(err).Error("failed to set the write deadline for the systemd notification socket")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = conn.Write([]byte(SdNotifyReady)); err != nil {
|
||||||
|
l.WithError(err).Error("failed to signal the systemd notification socket")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Debugln("notified systemd the service is ready")
|
||||||
|
}
|
||||||
10
cmd/nebula/notify_notlinux.go
Normal file
10
cmd/nebula/notify_notlinux.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
//go:build !linux
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
func notifyReady(_ *logrus.Logger) {
|
||||||
|
// No init service to notify
|
||||||
|
}
|
||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/imdario/mergo"
|
"dario.cat/mergo"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
@@ -121,6 +121,10 @@ func (c *C) HasChanged(k string) bool {
|
|||||||
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
|
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
|
||||||
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
|
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
|
||||||
func (c *C) CatchHUP(ctx context.Context) {
|
func (c *C) CatchHUP(ctx context.Context) {
|
||||||
|
if c.path == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ch := make(chan os.Signal, 1)
|
ch := make(chan os.Signal, 1)
|
||||||
signal.Notify(ch, syscall.SIGHUP)
|
signal.Notify(ch, syscall.SIGHUP)
|
||||||
|
|
||||||
@@ -236,6 +240,15 @@ func (c *C) GetInt(k string, d int) int {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
|
||||||
|
func (c *C) GetUint32(k string, d uint32) uint32 {
|
||||||
|
r := c.GetInt(k, int(d))
|
||||||
|
if uint64(r) > uint64(math.MaxUint32) {
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
return uint32(r)
|
||||||
|
}
|
||||||
|
|
||||||
// GetBool will get the bool for k or return the default d if not found or invalid
|
// GetBool will get the bool for k or return the default d if not found or invalid
|
||||||
func (c *C) GetBool(k string, d bool) bool {
|
func (c *C) GetBool(k string, d bool) bool {
|
||||||
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
|
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
|
||||||
@@ -348,7 +361,7 @@ func (c *C) parse() error {
|
|||||||
var m map[interface{}]interface{}
|
var m map[interface{}]interface{}
|
||||||
|
|
||||||
for _, path := range c.files {
|
for _, path := range c.files {
|
||||||
b, err := ioutil.ReadFile(path)
|
b, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,22 +1,24 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"dario.cat/mergo"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig_Load(t *testing.T) {
|
func TestConfig_Load(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
dir, err := ioutil.TempDir("", "config-test")
|
dir, err := os.MkdirTemp("", "config-test")
|
||||||
// invalid yaml
|
// invalid yaml
|
||||||
c := NewC(l)
|
c := NewC(l)
|
||||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||||
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||||
|
|
||||||
// simple multi config merge
|
// simple multi config merge
|
||||||
@@ -26,8 +28,8 @@ func TestConfig_Load(t *testing.T) {
|
|||||||
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||||
ioutil.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||||
assert.Nil(t, c.Load(dir))
|
assert.Nil(t, c.Load(dir))
|
||||||
expected := map[interface{}]interface{}{
|
expected := map[interface{}]interface{}{
|
||||||
"outer": map[interface{}]interface{}{
|
"outer": map[interface{}]interface{}{
|
||||||
@@ -117,9 +119,9 @@ func TestConfig_HasChanged(t *testing.T) {
|
|||||||
func TestConfig_ReloadConfig(t *testing.T) {
|
func TestConfig_ReloadConfig(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
done := make(chan bool, 1)
|
done := make(chan bool, 1)
|
||||||
dir, err := ioutil.TempDir("", "config-test")
|
dir, err := os.MkdirTemp("", "config-test")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||||
|
|
||||||
c := NewC(l)
|
c := NewC(l)
|
||||||
assert.Nil(t, c.Load(dir))
|
assert.Nil(t, c.Load(dir))
|
||||||
@@ -128,7 +130,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
|
|||||||
assert.False(t, c.HasChanged("outer"))
|
assert.False(t, c.HasChanged("outer"))
|
||||||
assert.False(t, c.HasChanged(""))
|
assert.False(t, c.HasChanged(""))
|
||||||
|
|
||||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||||
|
|
||||||
c.RegisterReloadCallback(func(c *C) {
|
c.RegisterReloadCallback(func(c *C) {
|
||||||
done <- true
|
done <- true
|
||||||
@@ -147,3 +149,77 @@ func TestConfig_ReloadConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure mergo merges are done the way we expect.
|
||||||
|
// This is needed to test for potential regressions, like:
|
||||||
|
// - https://github.com/imdario/mergo/issues/187
|
||||||
|
func TestConfig_MergoMerge(t *testing.T) {
|
||||||
|
configs := [][]byte{
|
||||||
|
[]byte(`
|
||||||
|
listen:
|
||||||
|
port: 1234
|
||||||
|
`),
|
||||||
|
[]byte(`
|
||||||
|
firewall:
|
||||||
|
inbound:
|
||||||
|
- port: 443
|
||||||
|
proto: tcp
|
||||||
|
groups:
|
||||||
|
- server
|
||||||
|
- port: 443
|
||||||
|
proto: tcp
|
||||||
|
groups:
|
||||||
|
- webapp
|
||||||
|
`),
|
||||||
|
[]byte(`
|
||||||
|
listen:
|
||||||
|
host: 0.0.0.0
|
||||||
|
port: 4242
|
||||||
|
firewall:
|
||||||
|
outbound:
|
||||||
|
- port: any
|
||||||
|
proto: any
|
||||||
|
host: any
|
||||||
|
inbound:
|
||||||
|
- port: any
|
||||||
|
proto: icmp
|
||||||
|
host: any
|
||||||
|
`),
|
||||||
|
}
|
||||||
|
|
||||||
|
var m map[any]any
|
||||||
|
|
||||||
|
// merge the same way config.parse() merges
|
||||||
|
for _, b := range configs {
|
||||||
|
var nm map[any]any
|
||||||
|
err := yaml.Unmarshal(b, &nm)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// We need to use WithAppendSlice so that firewall rules in separate
|
||||||
|
// files are appended together
|
||||||
|
err = mergo.Merge(&nm, m, mergo.WithAppendSlice)
|
||||||
|
m = nm
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Merged Config: %#v", m)
|
||||||
|
mYaml, err := yaml.Marshal(m)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("Merged Config as YAML:\n%s", mYaml)
|
||||||
|
|
||||||
|
// If a bug is present, some items might be replaced instead of merged like we expect
|
||||||
|
expected := map[any]any{
|
||||||
|
"firewall": map[any]any{
|
||||||
|
"inbound": []any{
|
||||||
|
map[any]any{"host": "any", "port": "any", "proto": "icmp"},
|
||||||
|
map[any]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
|
||||||
|
map[any]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
|
||||||
|
"outbound": []any{
|
||||||
|
map[any]any{"host": "any", "port": "any", "proto": "any"}}},
|
||||||
|
"listen": map[any]any{
|
||||||
|
"host": "0.0.0.0",
|
||||||
|
"port": 4242,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Equal(t, expected, m)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,150 +1,155 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet
|
type trafficDecision int
|
||||||
// and something like every 10 packets we could lock, send 10, then unlock for a moment
|
|
||||||
|
const (
|
||||||
|
doNothing trafficDecision = 0
|
||||||
|
deleteTunnel trafficDecision = 1 // delete the hostinfo on our side, do not notify the remote
|
||||||
|
closeTunnel trafficDecision = 2 // delete the hostinfo and notify the remote
|
||||||
|
swapPrimary trafficDecision = 3
|
||||||
|
migrateRelays trafficDecision = 4
|
||||||
|
tryRehandshake trafficDecision = 5
|
||||||
|
sendTestPacket trafficDecision = 6
|
||||||
|
)
|
||||||
|
|
||||||
type connectionManager struct {
|
type connectionManager struct {
|
||||||
hostMap *HostMap
|
in map[uint32]struct{}
|
||||||
in map[iputil.VpnIp]struct{}
|
inLock *sync.RWMutex
|
||||||
inLock *sync.RWMutex
|
|
||||||
inCount int
|
|
||||||
out map[iputil.VpnIp]struct{}
|
|
||||||
outLock *sync.RWMutex
|
|
||||||
outCount int
|
|
||||||
TrafficTimer *SystemTimerWheel
|
|
||||||
intf *Interface
|
|
||||||
|
|
||||||
pendingDeletion map[iputil.VpnIp]int
|
out map[uint32]struct{}
|
||||||
pendingDeletionLock *sync.RWMutex
|
outLock *sync.RWMutex
|
||||||
pendingDeletionTimer *SystemTimerWheel
|
|
||||||
|
|
||||||
checkInterval int
|
// relayUsed holds which relay localIndexs are in use
|
||||||
pendingDeletionInterval int
|
relayUsed map[uint32]struct{}
|
||||||
|
relayUsedLock *sync.RWMutex
|
||||||
|
|
||||||
|
hostMap *HostMap
|
||||||
|
trafficTimer *LockingTimerWheel[uint32]
|
||||||
|
intf *Interface
|
||||||
|
pendingDeletion map[uint32]struct{}
|
||||||
|
punchy *Punchy
|
||||||
|
checkInterval time.Duration
|
||||||
|
pendingDeletionInterval time.Duration
|
||||||
|
metricsTxPunchy metrics.Counter
|
||||||
|
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
// I wanted to call one matLock
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager {
|
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
||||||
|
var max time.Duration
|
||||||
|
if checkInterval < pendingDeletionInterval {
|
||||||
|
max = pendingDeletionInterval
|
||||||
|
} else {
|
||||||
|
max = checkInterval
|
||||||
|
}
|
||||||
|
|
||||||
nc := &connectionManager{
|
nc := &connectionManager{
|
||||||
hostMap: intf.hostMap,
|
hostMap: intf.hostMap,
|
||||||
in: make(map[iputil.VpnIp]struct{}),
|
in: make(map[uint32]struct{}),
|
||||||
inLock: &sync.RWMutex{},
|
inLock: &sync.RWMutex{},
|
||||||
inCount: 0,
|
out: make(map[uint32]struct{}),
|
||||||
out: make(map[iputil.VpnIp]struct{}),
|
|
||||||
outLock: &sync.RWMutex{},
|
outLock: &sync.RWMutex{},
|
||||||
outCount: 0,
|
relayUsed: make(map[uint32]struct{}),
|
||||||
TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
relayUsedLock: &sync.RWMutex{},
|
||||||
|
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
|
||||||
intf: intf,
|
intf: intf,
|
||||||
pendingDeletion: make(map[iputil.VpnIp]int),
|
pendingDeletion: make(map[uint32]struct{}),
|
||||||
pendingDeletionLock: &sync.RWMutex{},
|
|
||||||
pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
|
||||||
checkInterval: checkInterval,
|
checkInterval: checkInterval,
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
pendingDeletionInterval: pendingDeletionInterval,
|
||||||
|
punchy: punchy,
|
||||||
|
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
nc.Start(ctx)
|
nc.Start(ctx)
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) In(ip iputil.VpnIp) {
|
func (n *connectionManager) In(localIndex uint32) {
|
||||||
n.inLock.RLock()
|
n.inLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.in[ip]; ok {
|
if _, ok := n.in[localIndex]; ok {
|
||||||
n.inLock.RUnlock()
|
n.inLock.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.inLock.RUnlock()
|
n.inLock.RUnlock()
|
||||||
n.inLock.Lock()
|
n.inLock.Lock()
|
||||||
n.in[ip] = struct{}{}
|
n.in[localIndex] = struct{}{}
|
||||||
n.inLock.Unlock()
|
n.inLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Out(ip iputil.VpnIp) {
|
func (n *connectionManager) Out(localIndex uint32) {
|
||||||
n.outLock.RLock()
|
n.outLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.out[ip]; ok {
|
if _, ok := n.out[localIndex]; ok {
|
||||||
n.outLock.RUnlock()
|
n.outLock.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.outLock.RUnlock()
|
n.outLock.RUnlock()
|
||||||
n.outLock.Lock()
|
n.outLock.Lock()
|
||||||
// double check since we dropped the lock temporarily
|
n.out[localIndex] = struct{}{}
|
||||||
if _, ok := n.out[ip]; ok {
|
n.outLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
||||||
|
n.relayUsedLock.RLock()
|
||||||
|
// If this already exists, return
|
||||||
|
if _, ok := n.relayUsed[localIndex]; ok {
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
n.relayUsedLock.Lock()
|
||||||
|
n.relayUsed[localIndex] = struct{}{}
|
||||||
|
n.relayUsedLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
||||||
|
// resets the state for this local index
|
||||||
|
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
||||||
|
n.inLock.Lock()
|
||||||
|
n.outLock.Lock()
|
||||||
|
_, in := n.in[localIndex]
|
||||||
|
_, out := n.out[localIndex]
|
||||||
|
delete(n.in, localIndex)
|
||||||
|
delete(n.out, localIndex)
|
||||||
|
n.inLock.Unlock()
|
||||||
|
n.outLock.Unlock()
|
||||||
|
return in, out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
||||||
|
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
||||||
|
n.outLock.Lock()
|
||||||
|
if _, ok := n.out[localIndex]; ok {
|
||||||
n.outLock.Unlock()
|
n.outLock.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.out[ip] = struct{}{}
|
n.out[localIndex] = struct{}{}
|
||||||
n.AddTrafficWatch(ip, n.checkInterval)
|
n.trafficTimer.Add(localIndex, n.checkInterval)
|
||||||
n.outLock.Unlock()
|
n.outLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool {
|
|
||||||
n.inLock.RLock()
|
|
||||||
if _, ok := n.in[vpnIp]; ok {
|
|
||||||
n.inLock.RUnlock()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
n.inLock.RUnlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) ClearIP(ip iputil.VpnIp) {
|
|
||||||
n.inLock.Lock()
|
|
||||||
n.outLock.Lock()
|
|
||||||
delete(n.in, ip)
|
|
||||||
delete(n.out, ip)
|
|
||||||
n.inLock.Unlock()
|
|
||||||
n.outLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) ClearPendingDeletion(ip iputil.VpnIp) {
|
|
||||||
n.pendingDeletionLock.Lock()
|
|
||||||
delete(n.pendingDeletion, ip)
|
|
||||||
n.pendingDeletionLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) AddPendingDeletion(ip iputil.VpnIp) {
|
|
||||||
n.pendingDeletionLock.Lock()
|
|
||||||
if _, ok := n.pendingDeletion[ip]; ok {
|
|
||||||
n.pendingDeletion[ip] += 1
|
|
||||||
} else {
|
|
||||||
n.pendingDeletion[ip] = 0
|
|
||||||
}
|
|
||||||
n.pendingDeletionTimer.Add(ip, time.Second*time.Duration(n.pendingDeletionInterval))
|
|
||||||
n.pendingDeletionLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool {
|
|
||||||
n.pendingDeletionLock.RLock()
|
|
||||||
if _, ok := n.pendingDeletion[ip]; ok {
|
|
||||||
|
|
||||||
n.pendingDeletionLock.RUnlock()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
n.pendingDeletionLock.RUnlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) AddTrafficWatch(vpnIp iputil.VpnIp, seconds int) {
|
|
||||||
n.TrafficTimer.Add(vpnIp, time.Second*time.Duration(seconds))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) Start(ctx context.Context) {
|
func (n *connectionManager) Start(ctx context.Context) {
|
||||||
go n.Run(ctx)
|
go n.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Run(ctx context.Context) {
|
func (n *connectionManager) Run(ctx context.Context) {
|
||||||
|
//TODO: this tick should be based on the min wheel tick? Check firewall
|
||||||
clockSource := time.NewTicker(500 * time.Millisecond)
|
clockSource := time.NewTicker(500 * time.Millisecond)
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
@@ -156,154 +161,322 @@ func (n *connectionManager) Run(ctx context.Context) {
|
|||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
|
||||||
case now := <-clockSource.C:
|
case now := <-clockSource.C:
|
||||||
n.HandleMonitorTick(now, p, nb, out)
|
n.trafficTimer.Advance(now)
|
||||||
n.HandleDeletionTick(now)
|
for {
|
||||||
|
localIndex, has := n.trafficTimer.Purge()
|
||||||
|
if !has {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
n.doTrafficCheck(localIndex, p, nb, out, now)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) {
|
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||||
n.TrafficTimer.advance(now)
|
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
||||||
for {
|
|
||||||
ep := n.TrafficTimer.Purge()
|
switch decision {
|
||||||
if ep == nil {
|
case deleteTunnel:
|
||||||
break
|
if n.hostMap.DeleteHostInfo(hostinfo) {
|
||||||
|
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||||
|
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp := ep.(iputil.VpnIp)
|
case closeTunnel:
|
||||||
|
n.intf.sendCloseTunnel(hostinfo)
|
||||||
|
n.intf.closeTunnel(hostinfo)
|
||||||
|
|
||||||
// Check for traffic coming back in from this host.
|
case swapPrimary:
|
||||||
traf := n.CheckIn(vpnIp)
|
n.swapPrimary(hostinfo, primary)
|
||||||
|
|
||||||
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
|
case migrateRelays:
|
||||||
if err != nil {
|
n.migrateRelayUsed(hostinfo, primary)
|
||||||
n.l.Debugf("Not found in hostmap: %s", vpnIp)
|
|
||||||
n.ClearIP(vpnIp)
|
case tryRehandshake:
|
||||||
n.ClearPendingDeletion(vpnIp)
|
n.tryRehandshake(hostinfo)
|
||||||
|
|
||||||
|
case sendTestPacket:
|
||||||
|
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
n.resetRelayTrafficCheck(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||||
|
if hostinfo != nil {
|
||||||
|
n.relayUsedLock.Lock()
|
||||||
|
defer n.relayUsedLock.Unlock()
|
||||||
|
// No need to migrate any relays, delete usage info now.
|
||||||
|
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
|
delete(n.relayUsed, idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||||
|
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||||
|
|
||||||
|
for _, r := range relayFor {
|
||||||
|
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
||||||
|
|
||||||
|
var index uint32
|
||||||
|
var relayFrom iputil.VpnIp
|
||||||
|
var relayTo iputil.VpnIp
|
||||||
|
switch {
|
||||||
|
case ok && existing.State == Established:
|
||||||
|
// This relay already exists in newhostinfo, then do nothing.
|
||||||
continue
|
continue
|
||||||
}
|
case ok && existing.State == Requested:
|
||||||
|
// The relay exists in a Requested state; re-send the request
|
||||||
if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
|
index = existing.LocalIndex
|
||||||
continue
|
switch r.Type {
|
||||||
}
|
case TerminalType:
|
||||||
|
relayFrom = n.intf.myVpnIp
|
||||||
// If we saw an incoming packets from this ip and peer's certificate is not
|
relayTo = existing.PeerIp
|
||||||
// expired, just ignore.
|
case ForwardingType:
|
||||||
if traf {
|
relayFrom = existing.PeerIp
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
relayTo = newhostinfo.vpnIp
|
||||||
n.l.WithField("vpnIp", vpnIp).
|
default:
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
// should never happen
|
||||||
Debug("Tunnel status")
|
}
|
||||||
|
case !ok:
|
||||||
|
n.relayUsedLock.RLock()
|
||||||
|
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
||||||
|
// The relay hasn't been used; don't migrate it.
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
// The relay doesn't exist at all; create some relay state and send the request.
|
||||||
|
var err error
|
||||||
|
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||||
|
if err != nil {
|
||||||
|
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch r.Type {
|
||||||
|
case TerminalType:
|
||||||
|
relayFrom = n.intf.myVpnIp
|
||||||
|
relayTo = r.PeerIp
|
||||||
|
case ForwardingType:
|
||||||
|
relayFrom = r.PeerIp
|
||||||
|
relayTo = newhostinfo.vpnIp
|
||||||
|
default:
|
||||||
|
// should never happen
|
||||||
}
|
}
|
||||||
n.ClearIP(vpnIp)
|
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger(n.l).
|
// Send a CreateRelayRequest to the peer.
|
||||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
req := NebulaControl{
|
||||||
Debug("Tunnel status")
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
|
InitiatorRelayIndex: index,
|
||||||
if hostinfo != nil && hostinfo.ConnectionState != nil {
|
RelayFromIp: uint32(relayFrom),
|
||||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
RelayToIp: uint32(relayTo),
|
||||||
n.intf.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, p, nb, out)
|
}
|
||||||
|
msg, err := req.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||||
} else {
|
} else {
|
||||||
hostinfo.logger(n.l).Debugf("Hostinfo sadness: %s", vpnIp)
|
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
n.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": iputil.VpnIp(req.RelayFromIp),
|
||||||
|
"relayTo": iputil.VpnIp(req.RelayToIp),
|
||||||
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||||
|
"responderRelayIndex": req.ResponderRelayIndex,
|
||||||
|
"vpnIp": newhostinfo.vpnIp}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
n.AddPendingDeletion(vpnIp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) HandleDeletionTick(now time.Time) {
|
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||||
n.pendingDeletionTimer.advance(now)
|
n.hostMap.RLock()
|
||||||
for {
|
defer n.hostMap.RUnlock()
|
||||||
ep := n.pendingDeletionTimer.Purge()
|
|
||||||
if ep == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
vpnIp := ep.(iputil.VpnIp)
|
hostinfo := n.hostMap.Indexes[localIndex]
|
||||||
|
if hostinfo == nil {
|
||||||
|
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
||||||
|
delete(n.pendingDeletion, localIndex)
|
||||||
|
return doNothing, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
|
if n.isInvalidCertificate(now, hostinfo) {
|
||||||
if err != nil {
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
n.l.Debugf("Not found in hostmap: %s", vpnIp)
|
return closeTunnel, hostinfo, nil
|
||||||
n.ClearIP(vpnIp)
|
}
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
|
primary := n.hostMap.Hosts[hostinfo.vpnIp]
|
||||||
continue
|
mainHostInfo := true
|
||||||
}
|
if primary != nil && primary != hostinfo {
|
||||||
|
mainHostInfo = false
|
||||||
|
}
|
||||||
|
|
||||||
// If we saw an incoming packets from this ip and peer's certificate is not
|
// Check for traffic on this hostinfo
|
||||||
// expired, just ignore.
|
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
||||||
traf := n.CheckIn(vpnIp)
|
|
||||||
if traf {
|
|
||||||
n.l.WithField("vpnIp", vpnIp).
|
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "active"}).
|
|
||||||
Debug("Tunnel status")
|
|
||||||
|
|
||||||
n.ClearIP(vpnIp)
|
// A hostinfo is determined alive if there is incoming traffic
|
||||||
n.ClearPendingDeletion(vpnIp)
|
if inTraffic {
|
||||||
continue
|
decision := doNothing
|
||||||
}
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
|
|
||||||
// If it comes around on deletion wheel and hasn't resolved itself, delete
|
|
||||||
if n.checkPendingDeletion(vpnIp) {
|
|
||||||
cn := ""
|
|
||||||
if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil {
|
|
||||||
cn = hostinfo.ConnectionState.peerCert.Details.Name
|
|
||||||
}
|
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(n.l).
|
||||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||||
WithField("certName", cn).
|
Debug("Tunnel status")
|
||||||
Info("Tunnel status")
|
}
|
||||||
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
|
||||||
|
if mainHostInfo {
|
||||||
|
decision = tryRehandshake
|
||||||
|
|
||||||
n.ClearIP(vpnIp)
|
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
// TODO: This is only here to let tests work. Should do proper mocking
|
|
||||||
if n.intf.lightHouse != nil {
|
|
||||||
n.intf.lightHouse.DeleteVpnIp(vpnIp)
|
|
||||||
}
|
|
||||||
n.hostMap.DeleteHostInfo(hostinfo)
|
|
||||||
} else {
|
} else {
|
||||||
n.ClearIP(vpnIp)
|
if n.shouldSwapPrimary(hostinfo, primary) {
|
||||||
n.ClearPendingDeletion(vpnIp)
|
decision = swapPrimary
|
||||||
|
} else {
|
||||||
|
// migrate the relays to the primary, if in use.
|
||||||
|
decision = migrateRelays
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||||
|
|
||||||
|
if !outTraffic {
|
||||||
|
// Send a punch packet to keep the NAT state alive
|
||||||
|
n.sendPunch(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return decision, hostinfo, primary
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
||||||
|
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
||||||
|
hostinfo.logger(n.l).
|
||||||
|
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||||
|
Info("Tunnel status")
|
||||||
|
|
||||||
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
return deleteTunnel, hostinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
decision := doNothing
|
||||||
|
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||||
|
if !outTraffic {
|
||||||
|
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||||
|
// Just maintain NAT state if configured to do so.
|
||||||
|
n.sendPunch(hostinfo)
|
||||||
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||||
|
return doNothing, nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.punchy.GetTargetEverything() {
|
||||||
|
// This is similar to the old punchy behavior with a slight optimization.
|
||||||
|
// We aren't receiving traffic but we are sending it, punch on all known
|
||||||
|
// ips in case we need to re-prime NAT state
|
||||||
|
n.sendPunch(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
|
hostinfo.logger(n.l).
|
||||||
|
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||||
|
Debug("Tunnel status")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||||
|
decision = sendTestPacket
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
|
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
||||||
|
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
||||||
|
return decision, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid
|
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||||
func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil.VpnIp, hostinfo *HostInfo) bool {
|
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
||||||
if !n.intf.disconnectInvalid {
|
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||||
|
// Let's sort this out.
|
||||||
|
|
||||||
|
if current.vpnIp < n.intf.myVpnIp {
|
||||||
|
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
||||||
|
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
||||||
|
// The remotes vpn ip is lower than mine. I will not flip.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
certState := n.intf.pki.GetCertState()
|
||||||
|
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||||
|
n.hostMap.Lock()
|
||||||
|
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||||
|
if n.hostMap.Hosts[current.vpnIp] == primary {
|
||||||
|
n.hostMap.unlockedMakePrimary(current)
|
||||||
|
}
|
||||||
|
n.hostMap.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
||||||
|
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
||||||
|
// check and return true.
|
||||||
|
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||||
remoteCert := hostinfo.GetCert()
|
remoteCert := hostinfo.GetCert()
|
||||||
if remoteCert == nil {
|
if remoteCert == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
valid, err := remoteCert.Verify(now, n.intf.caPool)
|
valid, err := remoteCert.VerifyWithCache(now, n.intf.pki.GetCAPool())
|
||||||
if valid {
|
if valid {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
||||||
|
// Block listed certificates should always be disconnected
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
fingerprint, _ := remoteCert.Sha256Sum()
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
n.l.WithField("vpnIp", vpnIp).WithError(err).
|
hostinfo.logger(n.l).WithError(err).
|
||||||
WithField("certName", remoteCert.Details.Name).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||||
|
|
||||||
// Inform the remote and close the tunnel locally
|
|
||||||
n.intf.sendCloseTunnel(hostinfo)
|
|
||||||
n.intf.closeTunnel(hostinfo)
|
|
||||||
|
|
||||||
n.ClearIP(vpnIp)
|
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||||
|
if !n.punchy.GetPunch() {
|
||||||
|
// Punching is disabled
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.punchy.GetTargetEverything() {
|
||||||
|
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||||
|
n.metricsTxPunchy.Inc(1)
|
||||||
|
n.intf.outside.WriteTo([]byte{1}, addr)
|
||||||
|
})
|
||||||
|
|
||||||
|
} else if hostinfo.remote != nil {
|
||||||
|
n.metricsTxPunchy.Inc(1)
|
||||||
|
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||||
|
certState := n.intf.pki.GetCertState()
|
||||||
|
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||||
|
WithField("reason", "local certificate is not current").
|
||||||
|
Info("Re-handshaking with remote")
|
||||||
|
|
||||||
|
n.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
@@ -18,6 +19,21 @@ import (
|
|||||||
|
|
||||||
var vpnIp iputil.VpnIp
|
var vpnIp iputil.VpnIp
|
||||||
|
|
||||||
|
func newTestLighthouse() *LightHouse {
|
||||||
|
lh := &LightHouse{
|
||||||
|
l: test.NewLogger(),
|
||||||
|
addrMap: map[iputil.VpnIp]*RemoteList{},
|
||||||
|
queryChan: make(chan iputil.VpnIp, 10),
|
||||||
|
}
|
||||||
|
lighthouses := map[iputil.VpnIp]struct{}{}
|
||||||
|
staticList := map[iputil.VpnIp]struct{}{}
|
||||||
|
|
||||||
|
lh.lighthouses.Store(&lighthouses)
|
||||||
|
lh.staticList.Store(&staticList)
|
||||||
|
|
||||||
|
return lh
|
||||||
|
}
|
||||||
|
|
||||||
func Test_NewConnectionManagerTest(t *testing.T) {
|
func Test_NewConnectionManagerTest(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
@@ -27,65 +43,76 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
hostMap := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
rawCertificate: []byte{},
|
RawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
certificate: &cert.NebulaCertificate{},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
rawCertificateNoKey: []byte{},
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
|
lh := newTestLighthouse()
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &test.NoopTun{},
|
inside: &test.NoopTun{},
|
||||||
outside: &udp.Conn{},
|
outside: &udp.NoopConn{},
|
||||||
certState: cs,
|
|
||||||
firewall: &Firewall{},
|
firewall: &Firewall{},
|
||||||
lightHouse: lh,
|
lightHouse: lh,
|
||||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
pki: &PKI{},
|
||||||
|
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
now := time.Now()
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
nc.HandleMonitorTick(now, p, nb, out)
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
hostinfo := &HostInfo{
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
vpnIp: vpnIp,
|
||||||
certState: cs,
|
localIndexId: 1099,
|
||||||
H: &noise.HandshakeState{},
|
remoteIndexId: 9901,
|
||||||
}
|
}
|
||||||
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
|
myCert: &cert.NebulaCertificate{},
|
||||||
|
H: &noise.HandshakeState{},
|
||||||
|
}
|
||||||
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(vpnIp)
|
nc.Out(hostinfo.localIndexId)
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
nc.In(hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
// Move ahead 5s. Nothing should happen
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
next_tick := now.Add(5 * time.Second)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// Move ahead 6s. We haven't heard back
|
|
||||||
next_tick = now.Add(6 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// This host should now be up for deletion
|
|
||||||
assert.Contains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
// Move ahead some more
|
|
||||||
next_tick = now.Add(45 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// The host should be evicted
|
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.NotContains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
|
|
||||||
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
|
||||||
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
|
nc.Out(hostinfo.localIndexId)
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
|
// Do a final traffic check tick, the host should now be removed
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewConnectionManagerTest2(t *testing.T) {
|
func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||||
@@ -96,67 +123,78 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
hostMap := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
rawCertificate: []byte{},
|
RawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
certificate: &cert.NebulaCertificate{},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
rawCertificateNoKey: []byte{},
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
|
lh := newTestLighthouse()
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &test.NoopTun{},
|
inside: &test.NoopTun{},
|
||||||
outside: &udp.Conn{},
|
outside: &udp.NoopConn{},
|
||||||
certState: cs,
|
|
||||||
firewall: &Firewall{},
|
firewall: &Firewall{},
|
||||||
lightHouse: lh,
|
lightHouse: lh,
|
||||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
pki: &PKI{},
|
||||||
|
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
now := time.Now()
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
nc.HandleMonitorTick(now, p, nb, out)
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
hostinfo := &HostInfo{
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
vpnIp: vpnIp,
|
||||||
certState: cs,
|
localIndexId: 1099,
|
||||||
H: &noise.HandshakeState{},
|
remoteIndexId: 9901,
|
||||||
}
|
}
|
||||||
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
|
myCert: &cert.NebulaCertificate{},
|
||||||
|
H: &noise.HandshakeState{},
|
||||||
|
}
|
||||||
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(vpnIp)
|
nc.Out(hostinfo.localIndexId)
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
nc.In(hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp)
|
||||||
// Move ahead 5s. Nothing should happen
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
next_tick := now.Add(5 * time.Second)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// Move ahead 6s. We haven't heard back
|
|
||||||
next_tick = now.Add(6 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// This host should now be up for deletion
|
|
||||||
assert.Contains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
// We heard back this time
|
|
||||||
nc.In(vpnIp)
|
|
||||||
// Move ahead some more
|
|
||||||
next_tick = now.Add(45 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// The host should be evicted
|
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
|
|
||||||
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
|
||||||
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
|
nc.Out(hostinfo.localIndexId)
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
|
// We saw traffic, should no longer be pending deletion
|
||||||
|
nc.In(hostinfo.localIndexId)
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we can disconnect the peer.
|
// Check if we can disconnect the peer.
|
||||||
@@ -172,7 +210,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
hostMap := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
|
|
||||||
// Generate keys for CA and peer's cert.
|
// Generate keys for CA and peer's cert.
|
||||||
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
@@ -185,7 +223,8 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
PublicKey: pubCA,
|
PublicKey: pubCA,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
caCert.Sign(privCA)
|
|
||||||
|
assert.NoError(t, caCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||||
ncp := &cert.NebulaCAPool{
|
ncp := &cert.NebulaCAPool{
|
||||||
CAs: cert.NewCAPool().CAs,
|
CAs: cert.NewCAPool().CAs,
|
||||||
}
|
}
|
||||||
@@ -204,52 +243,58 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
Issuer: "ca",
|
Issuer: "ca",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
peerCert.Sign(privCA)
|
assert.NoError(t, peerCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
rawCertificate: []byte{},
|
RawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
certificate: &cert.NebulaCertificate{},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
rawCertificateNoKey: []byte{},
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
|
lh := newTestLighthouse()
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &test.NoopTun{},
|
inside: &test.NoopTun{},
|
||||||
outside: &udp.Conn{},
|
outside: &udp.NoopConn{},
|
||||||
certState: cs,
|
firewall: &Firewall{},
|
||||||
firewall: &Firewall{},
|
lightHouse: lh,
|
||||||
lightHouse: lh,
|
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
l: l,
|
||||||
l: l,
|
pki: &PKI{},
|
||||||
disconnectInvalid: true,
|
|
||||||
caPool: ncp,
|
|
||||||
}
|
}
|
||||||
|
ifce.pki.cs.Store(cs)
|
||||||
|
ifce.pki.caPool.Store(ncp)
|
||||||
|
ifce.disconnectInvalid.Store(true)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
ifce.connectionManager = nc
|
ifce.connectionManager = nc
|
||||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo := &HostInfo{
|
||||||
certState: cs,
|
vpnIp: vpnIp,
|
||||||
peerCert: &peerCert,
|
ConnectionState: &ConnectionState{
|
||||||
H: &noise.HandshakeState{},
|
myCert: &cert.NebulaCertificate{},
|
||||||
|
peerCert: &peerCert,
|
||||||
|
H: &noise.HandshakeState{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// Move ahead 45s.
|
// Move ahead 45s.
|
||||||
// Check if to disconnect with invalid certificate.
|
// Check if to disconnect with invalid certificate.
|
||||||
// Should be alive.
|
// Should be alive.
|
||||||
nextTick := now.Add(45 * time.Second)
|
nextTick := now.Add(45 * time.Second)
|
||||||
destroyed := nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
|
invalid := nc.isInvalidCertificate(nextTick, hostinfo)
|
||||||
assert.False(t, destroyed)
|
assert.False(t, invalid)
|
||||||
|
|
||||||
// Move ahead 61s.
|
// Move ahead 61s.
|
||||||
// Check if to disconnect with invalid certificate.
|
// Check if to disconnect with invalid certificate.
|
||||||
// Should be disconnected.
|
// Should be disconnected.
|
||||||
nextTick = now.Add(61 * time.Second)
|
nextTick = now.Add(61 * time.Second)
|
||||||
destroyed = nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
|
invalid = nc.isInvalidCertificate(nextTick, hostinfo)
|
||||||
assert.True(t, destroyed)
|
assert.True(t, invalid)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,32 +9,43 @@ import (
|
|||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ReplayWindow = 1024
|
const ReplayWindow = 1024
|
||||||
|
|
||||||
type ConnectionState struct {
|
type ConnectionState struct {
|
||||||
eKey *NebulaCipherState
|
eKey *NebulaCipherState
|
||||||
dKey *NebulaCipherState
|
dKey *NebulaCipherState
|
||||||
H *noise.HandshakeState
|
H *noise.HandshakeState
|
||||||
certState *CertState
|
myCert *cert.NebulaCertificate
|
||||||
peerCert *cert.NebulaCertificate
|
peerCert *cert.NebulaCertificate
|
||||||
initiator bool
|
initiator bool
|
||||||
atomicMessageCounter uint64
|
messageCounter atomic.Uint64
|
||||||
window *Bits
|
window *Bits
|
||||||
queueLock sync.Mutex
|
writeLock sync.Mutex
|
||||||
writeLock sync.Mutex
|
|
||||||
ready bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
func NewConnectionState(l *logrus.Logger, cipher string, certState *CertState, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||||
cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256)
|
var dhFunc noise.DHFunc
|
||||||
if f.cipher == "chachapoly" {
|
switch certState.Certificate.Details.Curve {
|
||||||
cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256)
|
case cert.Curve_CURVE25519:
|
||||||
|
dhFunc = noise.DH25519
|
||||||
|
case cert.Curve_P256:
|
||||||
|
dhFunc = noiseutil.DHP256
|
||||||
|
default:
|
||||||
|
l.Errorf("invalid curve: %s", certState.Certificate.Details.Curve)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
curCertState := f.certState
|
var cs noise.CipherSuite
|
||||||
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
|
if cipher == "chachapoly" {
|
||||||
|
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||||
|
} else {
|
||||||
|
cs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||||
|
}
|
||||||
|
|
||||||
|
static := noise.DHKey{Private: certState.PrivateKey, Public: certState.PublicKey}
|
||||||
|
|
||||||
b := NewBits(ReplayWindow)
|
b := NewBits(ReplayWindow)
|
||||||
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
||||||
@@ -59,8 +70,7 @@ func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern
|
|||||||
H: hs,
|
H: hs,
|
||||||
initiator: initiator,
|
initiator: initiator,
|
||||||
window: b,
|
window: b,
|
||||||
ready: false,
|
myCert: certState.Certificate,
|
||||||
certState: curCertState,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ci
|
return ci
|
||||||
@@ -70,7 +80,6 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
|||||||
return json.Marshal(m{
|
return json.Marshal(m{
|
||||||
"certificate": cs.peerCert,
|
"certificate": cs.peerCert,
|
||||||
"initiator": cs.initiator,
|
"initiator": cs.initiator,
|
||||||
"message_counter": atomic.LoadUint64(&cs.atomicMessageCounter),
|
"message_counter": cs.messageCounter.Load(),
|
||||||
"ready": cs.ready,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
110
control.go
110
control.go
@@ -5,26 +5,37 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sync/atomic"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/overlay"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
||||||
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
|
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
|
||||||
|
|
||||||
|
type controlEach func(h *HostInfo)
|
||||||
|
|
||||||
|
type controlHostLister interface {
|
||||||
|
QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo
|
||||||
|
ForEachIndex(each controlEach)
|
||||||
|
ForEachVpnIp(each controlEach)
|
||||||
|
GetPreferredRanges() []*net.IPNet
|
||||||
|
}
|
||||||
|
|
||||||
type Control struct {
|
type Control struct {
|
||||||
f *Interface
|
f *Interface
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
cancel context.CancelFunc
|
ctx context.Context
|
||||||
sshStart func()
|
cancel context.CancelFunc
|
||||||
statsStart func()
|
sshStart func()
|
||||||
dnsStart func()
|
statsStart func()
|
||||||
|
dnsStart func()
|
||||||
|
lighthouseStart func()
|
||||||
}
|
}
|
||||||
|
|
||||||
type ControlHostInfo struct {
|
type ControlHostInfo struct {
|
||||||
@@ -32,7 +43,6 @@ type ControlHostInfo struct {
|
|||||||
LocalIndex uint32 `json:"localIndex"`
|
LocalIndex uint32 `json:"localIndex"`
|
||||||
RemoteIndex uint32 `json:"remoteIndex"`
|
RemoteIndex uint32 `json:"remoteIndex"`
|
||||||
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
|
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
|
||||||
CachedPackets int `json:"cachedPackets"`
|
|
||||||
Cert *cert.NebulaCertificate `json:"cert"`
|
Cert *cert.NebulaCertificate `json:"cert"`
|
||||||
MessageCounter uint64 `json:"messageCounter"`
|
MessageCounter uint64 `json:"messageCounter"`
|
||||||
CurrentRemote *udp.Addr `json:"currentRemote"`
|
CurrentRemote *udp.Addr `json:"currentRemote"`
|
||||||
@@ -55,14 +65,21 @@ func (c *Control) Start() {
|
|||||||
if c.dnsStart != nil {
|
if c.dnsStart != nil {
|
||||||
go c.dnsStart()
|
go c.dnsStart()
|
||||||
}
|
}
|
||||||
|
if c.lighthouseStart != nil {
|
||||||
|
c.lighthouseStart()
|
||||||
|
}
|
||||||
|
|
||||||
// Start reading packets.
|
// Start reading packets.
|
||||||
c.f.run()
|
c.f.run()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop signals nebula to shutdown, returns after the shutdown is complete
|
func (c *Control) Context() context.Context {
|
||||||
|
return c.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop signals nebula to shutdown and close all tunnels, returns after the shutdown is complete
|
||||||
func (c *Control) Stop() {
|
func (c *Control) Stop() {
|
||||||
// Stop the handshakeManager (and other serivces), to prevent new tunnels from
|
// Stop the handshakeManager (and other services), to prevent new tunnels from
|
||||||
// being created while we're shutting them all down.
|
// being created while we're shutting them all down.
|
||||||
c.cancel()
|
c.cancel()
|
||||||
|
|
||||||
@@ -75,7 +92,7 @@ func (c *Control) Stop() {
|
|||||||
|
|
||||||
// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled
|
// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled
|
||||||
func (c *Control) ShutdownBlock() {
|
func (c *Control) ShutdownBlock() {
|
||||||
sigChan := make(chan os.Signal)
|
sigChan := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigChan, syscall.SIGTERM)
|
signal.Notify(sigChan, syscall.SIGTERM)
|
||||||
signal.Notify(sigChan, syscall.SIGINT)
|
signal.Notify(sigChan, syscall.SIGINT)
|
||||||
|
|
||||||
@@ -90,32 +107,41 @@ func (c *Control) RebindUDPServer() {
|
|||||||
_ = c.f.outside.Rebind()
|
_ = c.f.outside.Rebind()
|
||||||
|
|
||||||
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
|
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
|
||||||
c.f.lightHouse.SendUpdate(c.f)
|
c.f.lightHouse.SendUpdate()
|
||||||
|
|
||||||
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
|
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
|
||||||
c.f.rebindCount++
|
c.f.rebindCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListHostmap returns details about the actual or pending (handshaking) hostmap
|
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
|
||||||
func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo {
|
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
||||||
if pendingMap {
|
if pendingMap {
|
||||||
return listHostMap(c.f.handshakeManager.pendingHostMap)
|
return listHostMapHosts(c.f.handshakeManager)
|
||||||
} else {
|
} else {
|
||||||
return listHostMap(c.f.hostMap)
|
return listHostMapHosts(c.f.hostMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
|
||||||
|
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
||||||
|
if pendingMap {
|
||||||
|
return listHostMapIndexes(c.f.handshakeManager)
|
||||||
|
} else {
|
||||||
|
return listHostMapIndexes(c.f.hostMap)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
||||||
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
|
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
|
||||||
var hm *HostMap
|
var hl controlHostLister
|
||||||
if pending {
|
if pending {
|
||||||
hm = c.f.handshakeManager.pendingHostMap
|
hl = c.f.handshakeManager
|
||||||
} else {
|
} else {
|
||||||
hm = c.f.hostMap
|
hl = c.f.hostMap
|
||||||
}
|
}
|
||||||
|
|
||||||
h, err := hm.QueryVpnIp(vpnIp)
|
h := hl.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if h == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,8 +151,8 @@ func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlH
|
|||||||
|
|
||||||
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
||||||
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
|
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
|
||||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if hostInfo == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,8 +163,8 @@ func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *Control
|
|||||||
|
|
||||||
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
||||||
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
|
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
|
||||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if hostInfo == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +216,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||||||
hostInfos := []*HostInfo{}
|
hostInfos := []*HostInfo{}
|
||||||
// Grab the hostMap lock to access the Hosts map
|
// Grab the hostMap lock to access the Hosts map
|
||||||
c.f.hostMap.Lock()
|
c.f.hostMap.Lock()
|
||||||
for _, relayHost := range c.f.hostMap.Hosts {
|
for _, relayHost := range c.f.hostMap.Indexes {
|
||||||
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
|
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
|
||||||
hostInfos = append(hostInfos, relayHost)
|
hostInfos = append(hostInfos, relayHost)
|
||||||
}
|
}
|
||||||
@@ -206,6 +232,10 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Control) Device() overlay.Device {
|
||||||
|
return c.f.inside
|
||||||
|
}
|
||||||
|
|
||||||
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||||
|
|
||||||
chi := ControlHostInfo{
|
chi := ControlHostInfo{
|
||||||
@@ -213,13 +243,12 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
|||||||
LocalIndex: h.localIndexId,
|
LocalIndex: h.localIndexId,
|
||||||
RemoteIndex: h.remoteIndexId,
|
RemoteIndex: h.remoteIndexId,
|
||||||
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
||||||
CachedPackets: len(h.packetStore),
|
|
||||||
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
|
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
|
||||||
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
|
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.ConnectionState != nil {
|
if h.ConnectionState != nil {
|
||||||
chi.MessageCounter = atomic.LoadUint64(&h.ConnectionState.atomicMessageCounter)
|
chi.MessageCounter = h.ConnectionState.messageCounter.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
if c := h.GetCert(); c != nil {
|
if c := h.GetCert(); c != nil {
|
||||||
@@ -233,15 +262,20 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
|||||||
return chi
|
return chi
|
||||||
}
|
}
|
||||||
|
|
||||||
func listHostMap(hm *HostMap) []ControlHostInfo {
|
func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
|
||||||
hm.RLock()
|
hosts := make([]ControlHostInfo, 0)
|
||||||
hosts := make([]ControlHostInfo, len(hm.Hosts))
|
pr := hl.GetPreferredRanges()
|
||||||
i := 0
|
hl.ForEachVpnIp(func(hostinfo *HostInfo) {
|
||||||
for _, v := range hm.Hosts {
|
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||||
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
})
|
||||||
i++
|
return hosts
|
||||||
}
|
}
|
||||||
hm.RUnlock()
|
|
||||||
|
func listHostMapIndexes(hl controlHostLister) []ControlHostInfo {
|
||||||
|
hosts := make([]ControlHostInfo, 0)
|
||||||
|
pr := hl.GetPreferredRanges()
|
||||||
|
hl.ForEachIndex(func(hostinfo *HostInfo) {
|
||||||
|
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||||
|
})
|
||||||
return hosts
|
return hosts
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
||||||
// To properly ensure we are not exposing core memory to the caller
|
// To properly ensure we are not exposing core memory to the caller
|
||||||
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0))
|
hm := NewHostMap(l, &net.IPNet{}, make([]*net.IPNet, 0))
|
||||||
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
|
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
|
||||||
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
|
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -47,10 +47,10 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
Signature: []byte{1, 2, 1, 2, 1, 3},
|
Signature: []byte{1, 2, 1, 2, 1, 3},
|
||||||
}
|
}
|
||||||
|
|
||||||
remotes := NewRemoteList()
|
remotes := NewRemoteList(nil)
|
||||||
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
||||||
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
||||||
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
|
hm.unlockedAddHostInfo(&HostInfo{
|
||||||
remote: remote1,
|
remote: remote1,
|
||||||
remotes: remotes,
|
remotes: remotes,
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
@@ -64,9 +64,9 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
})
|
}, &Interface{})
|
||||||
|
|
||||||
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{
|
hm.unlockedAddHostInfo(&HostInfo{
|
||||||
remote: remote1,
|
remote: remote1,
|
||||||
remotes: remotes,
|
remotes: remotes,
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
@@ -80,7 +80,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
})
|
}, &Interface{})
|
||||||
|
|
||||||
c := Control{
|
c := Control{
|
||||||
f: &Interface{
|
f: &Interface{
|
||||||
@@ -96,7 +96,6 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
LocalIndex: 201,
|
LocalIndex: 201,
|
||||||
RemoteIndex: 200,
|
RemoteIndex: 200,
|
||||||
RemoteAddrs: []*udp.Addr{remote2, remote1},
|
RemoteAddrs: []*udp.Addr{remote2, remote1},
|
||||||
CachedPackets: 0,
|
|
||||||
Cert: crt.Copy(),
|
Cert: crt.Copy(),
|
||||||
MessageCounter: 0,
|
MessageCounter: 0,
|
||||||
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
|
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
|
||||||
@@ -105,7 +104,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we don't have any unexpected fields
|
// Make sure we don't have any unexpected fields
|
||||||
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||||
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||||
|
|
||||||
// Make sure we don't panic if the host info doesn't have a cert yet
|
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
|
||||||
"github.com/google/gopacket"
|
"github.com/google/gopacket"
|
||||||
"github.com/google/gopacket/layers"
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
@@ -14,12 +16,12 @@ import (
|
|||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WaitForTypeByIndex will pipe all messages from this control device into the pipeTo control device
|
// WaitForType will pipe all messages from this control device into the pipeTo control device
|
||||||
// returning after a message matching the criteria has been piped
|
// returning after a message matching the criteria has been piped
|
||||||
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
for {
|
for {
|
||||||
p := c.f.outside.Get(true)
|
p := c.f.outside.(*udp.TesterConn).Get(true)
|
||||||
if err := h.Parse(p.Data); err != nil {
|
if err := h.Parse(p.Data); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -35,7 +37,7 @@ func (c *Control) WaitForType(msgType header.MessageType, subType header.Message
|
|||||||
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
for {
|
for {
|
||||||
p := c.f.outside.Get(true)
|
p := c.f.outside.(*udp.TesterConn).Get(true)
|
||||||
if err := h.Parse(p.Data); err != nil {
|
if err := h.Parse(p.Data); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -88,11 +90,11 @@ func (c *Control) GetFromTun(block bool) []byte {
|
|||||||
|
|
||||||
// GetFromUDP will pull a udp packet off the udp side of nebula
|
// GetFromUDP will pull a udp packet off the udp side of nebula
|
||||||
func (c *Control) GetFromUDP(block bool) *udp.Packet {
|
func (c *Control) GetFromUDP(block bool) *udp.Packet {
|
||||||
return c.f.outside.Get(block)
|
return c.f.outside.(*udp.TesterConn).Get(block)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
|
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
|
||||||
return c.f.outside.TxPackets
|
return c.f.outside.(*udp.TesterConn).TxPackets
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetTunTxChan() <-chan []byte {
|
func (c *Control) GetTunTxChan() <-chan []byte {
|
||||||
@@ -101,7 +103,7 @@ func (c *Control) GetTunTxChan() <-chan []byte {
|
|||||||
|
|
||||||
// InjectUDPPacket will inject a packet into the udp side of nebula
|
// InjectUDPPacket will inject a packet into the udp side of nebula
|
||||||
func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
||||||
c.f.outside.Send(p)
|
c.f.outside.(*udp.TesterConn).Send(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
||||||
@@ -141,15 +143,27 @@ func (c *Control) GetVpnIp() iputil.VpnIp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetUDPAddr() string {
|
func (c *Control) GetUDPAddr() string {
|
||||||
return c.f.outside.Addr.String()
|
return c.f.outside.(*udp.TesterConn).Addr.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
||||||
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
|
hostinfo := c.f.handshakeManager.QueryVpnIp(iputil.Ip2VpnIp(vpnIp))
|
||||||
if !ok {
|
if hostinfo == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
c.f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Control) GetHostmap() *HostMap {
|
||||||
|
return c.f.hostMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) GetCert() *cert.NebulaCertificate {
|
||||||
|
return c.f.pki.GetCertState().Certificate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
|
||||||
|
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
||||||
|
}
|
||||||
|
|||||||
6
dist/arch/nebula.service
vendored
6
dist/arch/nebula.service
vendored
@@ -1,9 +1,11 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=nebula
|
Description=Nebula overlay networking tool
|
||||||
Wants=basic.target network-online.target
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target network-online.target
|
After=basic.target network.target network-online.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
SyslogIdentifier=nebula
|
SyslogIdentifier=nebula
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||||
|
|||||||
7
dist/fedora/nebula.service
vendored
7
dist/fedora/nebula.service
vendored
@@ -1,15 +1,16 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Nebula overlay networking tool
|
Description=Nebula overlay networking tool
|
||||||
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target network-online.target
|
After=basic.target network.target network-online.target
|
||||||
Before=sshd.service
|
Before=sshd.service
|
||||||
Wants=basic.target network-online.target
|
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
|
SyslogIdentifier=nebula
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||||
Restart=always
|
Restart=always
|
||||||
SyslogIdentifier=nebula
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
@@ -33,11 +34,10 @@ func newDnsRecords(hostMap *HostMap) *dnsRecords {
|
|||||||
|
|
||||||
func (d *dnsRecords) Query(data string) string {
|
func (d *dnsRecords) Query(data string) string {
|
||||||
d.RLock()
|
d.RLock()
|
||||||
if r, ok := d.dnsMap[data]; ok {
|
defer d.RUnlock()
|
||||||
d.RUnlock()
|
if r, ok := d.dnsMap[strings.ToLower(data)]; ok {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
d.RUnlock()
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,8 +47,8 @@ func (d *dnsRecords) QueryCert(data string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
iip := iputil.Ip2VpnIp(ip)
|
iip := iputil.Ip2VpnIp(ip)
|
||||||
hostinfo, err := d.hostMap.QueryVpnIp(iip)
|
hostinfo := d.hostMap.QueryVpnIp(iip)
|
||||||
if err != nil {
|
if hostinfo == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
q := hostinfo.GetCert()
|
q := hostinfo.GetCert()
|
||||||
@@ -62,8 +62,8 @@ func (d *dnsRecords) QueryCert(data string) string {
|
|||||||
|
|
||||||
func (d *dnsRecords) Add(host, data string) {
|
func (d *dnsRecords) Add(host, data string) {
|
||||||
d.Lock()
|
d.Lock()
|
||||||
d.dnsMap[host] = data
|
defer d.Unlock()
|
||||||
d.Unlock()
|
d.dnsMap[strings.ToLower(host)] = data
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||||
|
|||||||
@@ -4,32 +4,59 @@
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGoodHandshake(t *testing.T) {
|
func BenchmarkHotPath(b *testing.B) {
|
||||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse
|
// Put their info in our lighthouse
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r := router.NewR(b, myControl, theirControl)
|
||||||
|
r.CancelFlowLogs()
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
_ = r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
}
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGoodHandshake(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Start the servers
|
// Start the servers
|
||||||
myControl.Start()
|
myControl.Start()
|
||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
||||||
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||||
@@ -50,37 +77,38 @@ func TestGoodHandshake(t *testing.T) {
|
|||||||
myControl.WaitForType(1, 0, theirControl)
|
myControl.WaitForType(1, 0, theirControl)
|
||||||
|
|
||||||
t.Log("Make sure our host infos are correct")
|
t.Log("Make sure our host infos are correct")
|
||||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||||
|
|
||||||
t.Log("Get that cached packet and make sure it looks right")
|
t.Log("Get that cached packet and make sure it looks right")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("Do a bidirectional tunnel test")
|
t.Log("Do a bidirectional tunnel test")
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
defer r.RenderFlow()
|
defer r.RenderFlow()
|
||||||
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
//TODO: assert hostmaps
|
//TODO: assert hostmaps
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWrongResponderHandshake(t *testing.T) {
|
func TestWrongResponderHandshake(t *testing.T) {
|
||||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
|
||||||
// The IPs here are chosen on purpose:
|
// The IPs here are chosen on purpose:
|
||||||
// The current remote handling will sort by preference, public, and then lexically.
|
// The current remote handling will sort by preference, public, and then lexically.
|
||||||
// So we need them to have a higher address than evil (we could apply a preference though)
|
// So we need them to have a higher address than evil (we could apply a preference though)
|
||||||
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
|
||||||
evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
|
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Add their real udp addr, which should be tried after evil.
|
// Add their real udp addr, which should be tried after evil.
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, evilUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl, evilControl)
|
r := router.NewR(t, myControl, theirControl, evilControl)
|
||||||
@@ -92,7 +120,7 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
evilControl.Start()
|
evilControl.Start()
|
||||||
|
|
||||||
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
err := h.Parse(p.Data)
|
err := h.Parse(p.Data)
|
||||||
@@ -111,34 +139,38 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
|
|
||||||
t.Log("My cached packet should be received by them")
|
t.Log("My cached packet should be received by them")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("Test the tunnel with them")
|
t.Log("Test the tunnel with them")
|
||||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||||
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
t.Log("Flush all packets from all controllers")
|
t.Log("Flush all packets from all controllers")
|
||||||
r.FlushAll()
|
r.FlushAll()
|
||||||
|
|
||||||
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
||||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), true), "My pending hostmap should not contain evil")
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil")
|
||||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), false), "My main hostmap should not contain evil")
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil")
|
||||||
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
||||||
|
|
||||||
//TODO: assert hostmaps for everyone
|
//TODO: assert hostmaps for everyone
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl, evilControl)
|
||||||
t.Log("Success!")
|
t.Log("Success!")
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Case1_Stage1Race(t *testing.T) {
|
func TestStage1Race(t *testing.T) {
|
||||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
|
||||||
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
// But will eventually collapse down to a single tunnel
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse and vice versa
|
// Put their info in our lighthouse and vice versa
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIp, myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
@@ -149,8 +181,8 @@ func Test_Case1_Stage1Race(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake to start on both me and them")
|
t.Log("Trigger a handshake to start on both me and them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
theirControl.InjectTunUDPPacket(myVpnIp, 80, 80, []byte("Hi from them"))
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
t.Log("Get both stage 1 handshake packets")
|
t.Log("Get both stage 1 handshake packets")
|
||||||
myHsForThem := myControl.GetFromUDP(true)
|
myHsForThem := myControl.GetFromUDP(true)
|
||||||
@@ -159,43 +191,165 @@ func Test_Case1_Stage1Race(t *testing.T) {
|
|||||||
r.Log("Now inject both stage 1 handshake packets")
|
r.Log("Now inject both stage 1 handshake packets")
|
||||||
r.InjectUDPPacket(theirControl, myControl, theirHsForMe)
|
r.InjectUDPPacket(theirControl, myControl, theirHsForMe)
|
||||||
r.InjectUDPPacket(myControl, theirControl, myHsForThem)
|
r.InjectUDPPacket(myControl, theirControl, myHsForThem)
|
||||||
//TODO: they should win, grab their index for me and make sure I use it in the end.
|
|
||||||
|
|
||||||
r.Log("They should not have a stage 2 (won the race) but I should send one")
|
r.Log("Route until they receive a message packet")
|
||||||
r.InjectUDPPacket(myControl, theirControl, myControl.GetFromUDP(true))
|
myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
r.Log("Route for me until I send a message packet to them")
|
r.Log("Their cached packet should be received by me")
|
||||||
r.RouteForAllUntilAfterMsgTypeTo(theirControl, header.Message, header.MessageNone)
|
theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("My cached packet should be received by them")
|
r.Log("Do a bidirectional tunnel test")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
|
||||||
|
|
||||||
t.Log("Route for them until I send a message packet to me")
|
myHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
theirControl.WaitForType(1, 0, myControl)
|
myHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
t.Log("Their cached packet should be received by me")
|
// We should have two tunnels on both sides
|
||||||
theirCachedPacket := myControl.GetFromTun(true)
|
assert.Len(t, myHostmapHosts, 1)
|
||||||
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIp, myVpnIp, 80, 80)
|
assert.Len(t, theirHostmapHosts, 1)
|
||||||
|
assert.Len(t, myHostmapIndexes, 2)
|
||||||
|
assert.Len(t, theirHostmapIndexes, 2)
|
||||||
|
|
||||||
t.Log("Do a bidirectional tunnel test")
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
|
||||||
|
|
||||||
|
r.Log("Spin until connection manager tears down a tunnel")
|
||||||
|
|
||||||
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
|
// We should only have a single tunnel now on both sides
|
||||||
|
assert.Len(t, myFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, myFinalHostmapIndexes, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapIndexes, 1)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
//TODO: assert hostmaps
|
}
|
||||||
|
|
||||||
|
func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from me to them")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
|
r.Log("Nuke my hostmap")
|
||||||
|
myHostmap := myControl.GetHostmap()
|
||||||
|
myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||||
|
myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again"))
|
||||||
|
p = r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.Log("Wait for the dead index to go away")
|
||||||
|
start := len(theirControl.GetHostmap().Indexes)
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
if len(theirControl.GetHostmap().Indexes) < start {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from me to them")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Nuke my hostmap")
|
||||||
|
theirHostmap := theirControl.GetHostmap()
|
||||||
|
theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||||
|
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again"))
|
||||||
|
p = r.RouteForAllUntilTxTun(myControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.Log("Wait for the dead index to go away")
|
||||||
|
start := len(myControl.GetHostmap().Indexes)
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
if len(myControl.GetHostmap().Indexes) < start {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRelays(t *testing.T) {
|
func TestRelays(t *testing.T) {
|
||||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIp, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
relayControl, relayVpnIp, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(relayVpnIp, relayUdpAddr)
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
myControl.InjectRelays(theirVpnIp, []net.IP{relayVpnIp})
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
@@ -207,11 +361,616 @@ func TestRelays(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake from me to them via the relay")
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIp, theirVpnIp, 80, 80)
|
r.Log("Assert the tunnel works")
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||||
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStage1RaceRelays(t *testing.T) {
|
||||||
|
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between me and relay")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between them and relay")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
r.Log("Wait for a packet from them to me")
|
||||||
|
p := r.RouteForAllUntilTxTun(myControl)
|
||||||
|
_ = p
|
||||||
|
|
||||||
|
r.FlushAll()
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
relayControl.Stop()
|
||||||
|
//
|
||||||
|
////TODO: assert hostmaps
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStage1RaceRelays2(t *testing.T) {
|
||||||
|
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
l := NewTestLogger()
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between me and relay")
|
||||||
|
l.Info("Get a tunnel between me and relay")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between them and relay")
|
||||||
|
l.Info("Get a tunnel between them and relay")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||||
|
l.Info("Trigger a handshake from both them and me via relay to them and me")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
//r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone)
|
||||||
|
//r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone)
|
||||||
|
|
||||||
|
r.Log("Wait for a packet from them to me")
|
||||||
|
l.Info("Wait for a packet from them to me; myControl")
|
||||||
|
r.RouteForAllUntilTxTun(myControl)
|
||||||
|
l.Info("Wait for a packet from them to me; theirControl")
|
||||||
|
r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
l.Info("Assert the tunnel works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
|
||||||
|
t.Log("Wait until we remove extra tunnels")
|
||||||
|
l.Info("Wait until we remove extra tunnels")
|
||||||
|
l.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"myControl": len(myControl.GetHostmap().Indexes),
|
||||||
|
"theirControl": len(theirControl.GetHostmap().Indexes),
|
||||||
|
"relayControl": len(relayControl.GetHostmap().Indexes),
|
||||||
|
}).Info("Waiting for hostinfos to be removed...")
|
||||||
|
hostInfos := len(myControl.GetHostmap().Indexes) + len(theirControl.GetHostmap().Indexes) + len(relayControl.GetHostmap().Indexes)
|
||||||
|
retries := 60
|
||||||
|
for hostInfos > 6 && retries > 0 {
|
||||||
|
hostInfos = len(myControl.GetHostmap().Indexes) + len(theirControl.GetHostmap().Indexes) + len(relayControl.GetHostmap().Indexes)
|
||||||
|
l.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"myControl": len(myControl.GetHostmap().Indexes),
|
||||||
|
"theirControl": len(theirControl.GetHostmap().Indexes),
|
||||||
|
"relayControl": len(relayControl.GetHostmap().Indexes),
|
||||||
|
}).Info("Waiting for hostinfos to be removed...")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
retries--
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
l.Info("Assert the tunnel works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
relayControl.Stop()
|
||||||
|
|
||||||
|
//
|
||||||
|
////TODO: assert hostmaps
|
||||||
|
}
|
||||||
|
func TestRehandshakingRelays(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
|
||||||
|
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||||
|
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
|
||||||
|
r.Log("Renew relay certificate and spin until me and them sees it")
|
||||||
|
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||||
|
|
||||||
|
caB, err := ca.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
relayConfig.Settings["pki"] = m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(myNextPEM),
|
||||||
|
"key": string(myNextPrivKey),
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(relayConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
relayConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
for {
|
||||||
|
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
r.Log("Certificate between my and relay is updated!")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
r.Log("Certificate between their and relay is updated!")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
// We should have two hostinfos on all sides
|
||||||
|
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("myControl hostinfos got cleaned up!")
|
||||||
|
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("theirControl hostinfos got cleaned up!")
|
||||||
|
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("relayControl hostinfos got cleaned up!")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRehandshakingRelaysPrimary(t *testing.T) {
|
||||||
|
// This test is the same as TestRehandshakingRelays but one of the terminal types is a primary swap winner
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 128}, m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 1}, m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
|
||||||
|
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||||
|
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
|
||||||
|
r.Log("Renew relay certificate and spin until me and them sees it")
|
||||||
|
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||||
|
|
||||||
|
caB, err := ca.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
relayConfig.Settings["pki"] = m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(myNextPEM),
|
||||||
|
"key": string(myNextPrivKey),
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(relayConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
relayConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
for {
|
||||||
|
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
r.Log("Certificate between my and relay is updated!")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
r.Log("Certificate between their and relay is updated!")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
// We should have two hostinfos on all sides
|
||||||
|
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("myControl hostinfos got cleaned up!")
|
||||||
|
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("theirControl hostinfos got cleaned up!")
|
||||||
|
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("relayControl hostinfos got cleaned up!")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRehandshaking(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse and vice versa
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Stand up a tunnel between me and them")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Renew my certificate and spin until their sees it")
|
||||||
|
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
|
||||||
|
|
||||||
|
caB, err := ca.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
myConfig.Settings["pki"] = m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(myNextPEM),
|
||||||
|
"key": string(myNextPrivKey),
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(myConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
myConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flip their firewall to only allowing the new group to catch the tunnels reverting incorrectly
|
||||||
|
rc, err = yaml.Marshal(theirConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
var theirNewConfig m
|
||||||
|
assert.NoError(t, yaml.Unmarshal(rc, &theirNewConfig))
|
||||||
|
theirFirewall := theirNewConfig["firewall"].(map[interface{}]interface{})
|
||||||
|
theirFirewall["inbound"] = []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"group": "new group",
|
||||||
|
}}
|
||||||
|
rc, err = yaml.Marshal(theirNewConfig)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
theirConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
r.Log("Spin until there is only 1 tunnel")
|
||||||
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
|
// Make sure the correct tunnel won
|
||||||
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
|
assert.Contains(t, c.Cert.Details.Groups, "new group")
|
||||||
|
|
||||||
|
// We should only have a single tunnel now on both sides
|
||||||
|
assert.Len(t, myFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, myFinalHostmapIndexes, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapIndexes, 1)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRehandshakingLoser(t *testing.T) {
|
||||||
|
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
|
||||||
|
// Should be the one with the new certificate
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse and vice versa
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Stand up a tunnel between me and them")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
tt1 := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
|
tt2 := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
|
fmt.Println(tt1.LocalIndex, tt2.LocalIndex)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Renew their certificate and spin until mine sees it")
|
||||||
|
_, _, theirNextPrivKey, theirNextPEM := NewTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
|
||||||
|
|
||||||
|
caB, err := ca.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
theirConfig.Settings["pki"] = m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(theirNextPEM),
|
||||||
|
"key": string(theirNextPrivKey),
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(theirConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
theirConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
|
|
||||||
|
_, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"]
|
||||||
|
if theirNewGroup {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flip my firewall to only allowing the new group to catch the tunnels reverting incorrectly
|
||||||
|
rc, err = yaml.Marshal(myConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
var myNewConfig m
|
||||||
|
assert.NoError(t, yaml.Unmarshal(rc, &myNewConfig))
|
||||||
|
theirFirewall := myNewConfig["firewall"].(map[interface{}]interface{})
|
||||||
|
theirFirewall["inbound"] = []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"group": "their new group",
|
||||||
|
}}
|
||||||
|
rc, err = yaml.Marshal(myNewConfig)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
myConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
r.Log("Spin until there is only 1 tunnel")
|
||||||
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
|
// Make sure the correct tunnel won
|
||||||
|
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
|
assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group")
|
||||||
|
|
||||||
|
// We should only have a single tunnel now on both sides
|
||||||
|
assert.Len(t, myFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, myFinalHostmapIndexes, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapIndexes, 1)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRaceRegression(t *testing.T) {
|
||||||
|
// This test forces stage 1, stage 2, stage 1 to be received by me from them
|
||||||
|
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
|
||||||
|
// caused a cross-linked hostinfo
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
//them rx stage:1 initiatorIndex=642843150 responderIndex=0
|
||||||
|
//me rx stage:1 initiatorIndex=120607833 responderIndex=0
|
||||||
|
//them rx stage:1 initiatorIndex=642843150 responderIndex=0
|
||||||
|
//me rx stage:2 initiatorIndex=642843150 responderIndex=3701775874
|
||||||
|
//me rx stage:1 initiatorIndex=120607833 responderIndex=0
|
||||||
|
//them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089
|
||||||
|
|
||||||
|
t.Log("Start both handshakes")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
t.Log("Get both stage 1")
|
||||||
|
myStage1ForThem := myControl.GetFromUDP(true)
|
||||||
|
theirStage1ForMe := theirControl.GetFromUDP(true)
|
||||||
|
|
||||||
|
t.Log("Inject them in a special way")
|
||||||
|
theirControl.InjectUDPPacket(myStage1ForThem)
|
||||||
|
myControl.InjectUDPPacket(theirStage1ForMe)
|
||||||
|
theirControl.InjectUDPPacket(myStage1ForThem)
|
||||||
|
|
||||||
|
//TODO: ensure stage 2
|
||||||
|
t.Log("Get both stage 2")
|
||||||
|
myStage2ForThem := myControl.GetFromUDP(true)
|
||||||
|
theirStage2ForMe := theirControl.GetFromUDP(true)
|
||||||
|
|
||||||
|
t.Log("Inject them in a special way again")
|
||||||
|
myControl.InjectUDPPacket(theirStage2ForMe)
|
||||||
|
myControl.InjectUDPPacket(theirStage1ForMe)
|
||||||
|
theirControl.InjectUDPPacket(myStage2ForThem)
|
||||||
|
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
t.Log("Flush the packets")
|
||||||
|
r.RouteForAllUntilTxTun(myControl)
|
||||||
|
r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
t.Log("Make sure the tunnel still works")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: test
|
||||||
|
// Race winner renews and handshakes
|
||||||
|
// Race loser renews and handshakes
|
||||||
|
// Does race winner repin the cert to old?
|
||||||
//TODO: add a test with many lies
|
//TODO: add a test with many lies
|
||||||
|
|||||||
118
e2e/helpers.go
Normal file
118
e2e/helpers.go
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"golang.org/x/crypto/curve25519"
|
||||||
|
"golang.org/x/crypto/ed25519"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewTestCaCert will generate a CA cert
|
||||||
|
func NewTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
|
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
nc := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test ca",
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: true,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ips) > 0 {
|
||||||
|
nc.Details.Ips = ips
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subnets) > 0 {
|
||||||
|
nc.Details.Subnets = subnets
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(groups) > 0 {
|
||||||
|
nc.Details.Groups = groups
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := nc.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nc, pub, priv, pem
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestCert will generate a signed certificate with the provided details.
|
||||||
|
// Expiry times are defaulted if you do not pass them in
|
||||||
|
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
|
issuer, err := ca.Sha256Sum()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub, rawPriv := x25519Keypair()
|
||||||
|
|
||||||
|
nc := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: name,
|
||||||
|
Ips: []*net.IPNet{ip},
|
||||||
|
Subnets: subnets,
|
||||||
|
Groups: groups,
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: issuer,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(ca.Details.Curve, key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := nc.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||||
|
}
|
||||||
|
|
||||||
|
func x25519Keypair() ([]byte, []byte) {
|
||||||
|
privkey := make([]byte, 32)
|
||||||
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubkey, privkey
|
||||||
|
}
|
||||||
@@ -4,18 +4,16 @@
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"dario.cat/mergo"
|
||||||
"github.com/google/gopacket"
|
"github.com/google/gopacket"
|
||||||
"github.com/google/gopacket/layers"
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/imdario/mergo"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
@@ -23,15 +21,13 @@ import (
|
|||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"golang.org/x/crypto/curve25519"
|
|
||||||
"golang.org/x/crypto/ed25519"
|
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
|
|
||||||
// newSimpleServer creates a nebula instance with many assumptions
|
// newSimpleServer creates a nebula instance with many assumptions
|
||||||
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, net.IP, *net.UDPAddr) {
|
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) {
|
||||||
l := NewTestLogger()
|
l := NewTestLogger()
|
||||||
|
|
||||||
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
||||||
@@ -41,7 +37,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
|||||||
IP: udpIp,
|
IP: udpIp,
|
||||||
Port: 4242,
|
Port: 4242,
|
||||||
}
|
}
|
||||||
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||||
|
|
||||||
caB, err := caCrt.MarshalToPEM()
|
caB, err := caCrt.MarshalToPEM()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -78,6 +74,10 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
|||||||
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
||||||
"level": l.Level.String(),
|
"level": l.Level.String(),
|
||||||
},
|
},
|
||||||
|
"timers": m{
|
||||||
|
"pending_deletion_interval": 2,
|
||||||
|
"connection_alive_interval": 2,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if overrides != nil {
|
if overrides != nil {
|
||||||
@@ -102,113 +102,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return control, vpnIpNet.IP, &udpAddr
|
return control, vpnIpNet, &udpAddr, c
|
||||||
}
|
|
||||||
|
|
||||||
// newTestCaCert will generate a CA cert
|
|
||||||
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
|
||||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
|
||||||
if before.IsZero() {
|
|
||||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
}
|
|
||||||
if after.IsZero() {
|
|
||||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
nc := &cert.NebulaCertificate{
|
|
||||||
Details: cert.NebulaCertificateDetails{
|
|
||||||
Name: "test ca",
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pub,
|
|
||||||
IsCA: true,
|
|
||||||
InvertedGroups: make(map[string]struct{}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ips) > 0 {
|
|
||||||
nc.Details.Ips = ips
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(subnets) > 0 {
|
|
||||||
nc.Details.Subnets = subnets
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(groups) > 0 {
|
|
||||||
nc.Details.Groups = groups
|
|
||||||
}
|
|
||||||
|
|
||||||
err = nc.Sign(priv)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pem, err := nc.MarshalToPEM()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nc, pub, priv, pem
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTestCert will generate a signed certificate with the provided details.
|
|
||||||
// Expiry times are defaulted if you do not pass them in
|
|
||||||
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
|
||||||
issuer, err := ca.Sha256Sum()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if before.IsZero() {
|
|
||||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
if after.IsZero() {
|
|
||||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub, rawPriv := x25519Keypair()
|
|
||||||
|
|
||||||
nc := &cert.NebulaCertificate{
|
|
||||||
Details: cert.NebulaCertificateDetails{
|
|
||||||
Name: name,
|
|
||||||
Ips: []*net.IPNet{ip},
|
|
||||||
Subnets: subnets,
|
|
||||||
Groups: groups,
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pub,
|
|
||||||
IsCA: false,
|
|
||||||
Issuer: issuer,
|
|
||||||
InvertedGroups: make(map[string]struct{}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err = nc.Sign(key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pem, err := nc.MarshalToPEM()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
|
||||||
}
|
|
||||||
|
|
||||||
func x25519Keypair() ([]byte, []byte) {
|
|
||||||
privkey := make([]byte, 32)
|
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return pubkey, privkey
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type doneCb func()
|
type doneCb func()
|
||||||
@@ -232,12 +126,12 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
|
|||||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
||||||
// Send a packet from them to me
|
// Send a packet from them to me
|
||||||
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
||||||
bPacket := r.RouteUntilTxTun(controlB, controlA)
|
bPacket := r.RouteForAllUntilTxTun(controlA)
|
||||||
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
||||||
|
|
||||||
// And once more from me to them
|
// And once more from me to them
|
||||||
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
|
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
|
||||||
aPacket := r.RouteUntilTxTun(controlA, controlB)
|
aPacket := r.RouteForAllUntilTxTun(controlB)
|
||||||
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,7 +198,8 @@ func NewTestLogger() *logrus.Logger {
|
|||||||
|
|
||||||
v := os.Getenv("TEST_LOGS")
|
v := os.Getenv("TEST_LOGS")
|
||||||
if v == "" {
|
if v == "" {
|
||||||
l.SetOutput(ioutil.Discard)
|
l.SetOutput(io.Discard)
|
||||||
|
l.SetLevel(logrus.PanicLevel)
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
145
e2e/router/hostmap.go
Normal file
145
e2e/router/hostmap.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package router
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type edge struct {
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
dual bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func renderHostmaps(controls ...*nebula.Control) string {
|
||||||
|
var lines []*edge
|
||||||
|
r := "graph TB\n"
|
||||||
|
for _, c := range controls {
|
||||||
|
sr, se := renderHostmap(c)
|
||||||
|
r += sr
|
||||||
|
for _, e := range se {
|
||||||
|
add := true
|
||||||
|
|
||||||
|
// Collapse duplicate edges into a bi-directionally connected edge
|
||||||
|
for _, ge := range lines {
|
||||||
|
if e.to == ge.from && e.from == ge.to {
|
||||||
|
add = false
|
||||||
|
ge.dual = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if add {
|
||||||
|
lines = append(lines, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
if line.dual {
|
||||||
|
r += fmt.Sprintf("\t%v <--> %v\n", line.from, line.to)
|
||||||
|
} else {
|
||||||
|
r += fmt.Sprintf("\t%v --> %v\n", line.from, line.to)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func renderHostmap(c *nebula.Control) (string, []*edge) {
|
||||||
|
var lines []string
|
||||||
|
var globalLines []*edge
|
||||||
|
|
||||||
|
clusterName := strings.Trim(c.GetCert().Details.Name, " ")
|
||||||
|
clusterVpnIp := c.GetCert().Details.Ips[0].IP
|
||||||
|
r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp)
|
||||||
|
|
||||||
|
hm := c.GetHostmap()
|
||||||
|
hm.RLock()
|
||||||
|
defer hm.RUnlock()
|
||||||
|
|
||||||
|
// Draw the vpn to index nodes
|
||||||
|
r += fmt.Sprintf("\t\tsubgraph %s.hosts[\"Hosts (vpn ip to index)\"]\n", clusterName)
|
||||||
|
hosts := sortedHosts(hm.Hosts)
|
||||||
|
for _, vpnIp := range hosts {
|
||||||
|
hi := hm.Hosts[vpnIp]
|
||||||
|
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, vpnIp, vpnIp)
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, hi.GetLocalIndex()))
|
||||||
|
|
||||||
|
rs := hi.GetRelayState()
|
||||||
|
for _, relayIp := range rs.CopyRelayIps() {
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, relayIp := range rs.CopyRelayForIdxs() {
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r += "\t\tend\n"
|
||||||
|
|
||||||
|
// Draw the relay hostinfos
|
||||||
|
if len(hm.Relays) > 0 {
|
||||||
|
r += fmt.Sprintf("\t\tsubgraph %s.relays[\"Relays (relay index to hostinfo)\"]\n", clusterName)
|
||||||
|
for relayIndex, hi := range hm.Relays {
|
||||||
|
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, relayIndex, relayIndex)
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, relayIndex, clusterName, hi.GetLocalIndex()))
|
||||||
|
}
|
||||||
|
r += "\t\tend\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Draw the local index to relay or remote index nodes
|
||||||
|
r += fmt.Sprintf("\t\tsubgraph indexes.%s[\"Indexes (index to hostinfo)\"]\n", clusterName)
|
||||||
|
indexes := sortedIndexes(hm.Indexes)
|
||||||
|
for _, idx := range indexes {
|
||||||
|
hi, ok := hm.Indexes[idx]
|
||||||
|
if ok {
|
||||||
|
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp())
|
||||||
|
remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ")
|
||||||
|
globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())})
|
||||||
|
_ = hi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r += "\t\tend\n"
|
||||||
|
|
||||||
|
// Add the edges inside this host
|
||||||
|
for _, line := range lines {
|
||||||
|
r += fmt.Sprintf("\t\t%v\n", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
r += "\tend\n"
|
||||||
|
return r, globalLines
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp {
|
||||||
|
keys := make([]iputil.VpnIp, 0, len(hosts))
|
||||||
|
for key := range hosts {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(keys, func(i, j int) bool {
|
||||||
|
return keys[i] > keys[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortedIndexes(indexes map[uint32]*nebula.HostInfo) []uint32 {
|
||||||
|
keys := make([]uint32, 0, len(indexes))
|
||||||
|
for key := range indexes {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(keys, func(i, j int) bool {
|
||||||
|
return keys[i] > keys[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -22,6 +23,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
type R struct {
|
type R struct {
|
||||||
@@ -40,14 +42,37 @@ type R struct {
|
|||||||
// A map of vpn ip to the nebula control it belongs to
|
// A map of vpn ip to the nebula control it belongs to
|
||||||
vpnControls map[iputil.VpnIp]*nebula.Control
|
vpnControls map[iputil.VpnIp]*nebula.Control
|
||||||
|
|
||||||
flow []flowEntry
|
ignoreFlows []ignoreFlow
|
||||||
|
flow []flowEntry
|
||||||
|
|
||||||
|
// A set of additional mermaid graphs to draw in the flow log markdown file
|
||||||
|
// Currently consisting only of hostmap renders
|
||||||
|
additionalGraphs []mermaidGraph
|
||||||
|
|
||||||
// All interactions are locked to help serialize behavior
|
// All interactions are locked to help serialize behavior
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
fn string
|
fn string
|
||||||
cancelRender context.CancelFunc
|
cancelRender context.CancelFunc
|
||||||
t *testing.T
|
t testing.TB
|
||||||
|
}
|
||||||
|
|
||||||
|
type ignoreFlow struct {
|
||||||
|
tun NullBool
|
||||||
|
messageType header.MessageType
|
||||||
|
subType header.MessageSubType
|
||||||
|
//from
|
||||||
|
//to
|
||||||
|
}
|
||||||
|
|
||||||
|
type mermaidGraph struct {
|
||||||
|
title string
|
||||||
|
content string
|
||||||
|
}
|
||||||
|
|
||||||
|
type NullBool struct {
|
||||||
|
HasValue bool
|
||||||
|
IsTrue bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type flowEntry struct {
|
type flowEntry struct {
|
||||||
@@ -63,6 +88,12 @@ type packet struct {
|
|||||||
rx bool // the packet was received by a udp device
|
rx bool // the packet was received by a udp device
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *packet) WasReceived() {
|
||||||
|
if p != nil {
|
||||||
|
p.rx = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ExitType int
|
type ExitType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -79,7 +110,7 @@ type ExitFunc func(packet *udp.Packet, receiver *nebula.Control) ExitType
|
|||||||
// NewR creates a new router to pass packets in a controlled fashion between the provided controllers.
|
// NewR creates a new router to pass packets in a controlled fashion between the provided controllers.
|
||||||
// The packet flow will be recorded in a file within the mermaid directory under the same name as the test.
|
// The packet flow will be recorded in a file within the mermaid directory under the same name as the test.
|
||||||
// Renders will occur automatically, roughly every 100ms, until a call to RenderFlow() is made
|
// Renders will occur automatically, roughly every 100ms, until a call to RenderFlow() is made
|
||||||
func NewR(t *testing.T, controls ...*nebula.Control) *R {
|
func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
if err := os.MkdirAll("mermaid", 0755); err != nil {
|
if err := os.MkdirAll("mermaid", 0755); err != nil {
|
||||||
@@ -91,6 +122,8 @@ func NewR(t *testing.T, controls ...*nebula.Control) *R {
|
|||||||
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
|
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
|
||||||
inNat: make(map[string]*nebula.Control),
|
inNat: make(map[string]*nebula.Control),
|
||||||
outNat: make(map[string]net.UDPAddr),
|
outNat: make(map[string]net.UDPAddr),
|
||||||
|
flow: []flowEntry{},
|
||||||
|
ignoreFlows: []ignoreFlow{},
|
||||||
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
||||||
t: t,
|
t: t,
|
||||||
cancelRender: cancel,
|
cancelRender: cancel,
|
||||||
@@ -119,6 +152,7 @@ func NewR(t *testing.T, controls ...*nebula.Control) *R {
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-clockSource.C:
|
case <-clockSource.C:
|
||||||
|
r.renderHostmaps("clock tick")
|
||||||
r.renderFlow()
|
r.renderFlow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -148,14 +182,24 @@ func (r *R) RenderFlow() {
|
|||||||
r.renderFlow()
|
r.renderFlow()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CancelFlowLogs stops flow logs from being tracked and destroys any logs already collected
|
||||||
|
func (r *R) CancelFlowLogs() {
|
||||||
|
r.cancelRender()
|
||||||
|
r.flow = nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *R) renderFlow() {
|
func (r *R) renderFlow() {
|
||||||
|
if r.flow == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
|
f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var participants = map[string]struct{}{}
|
var participants = map[string]struct{}{}
|
||||||
var participansVals []string
|
var participantsVals []string
|
||||||
|
|
||||||
fmt.Fprintln(f, "```mermaid")
|
fmt.Fprintln(f, "```mermaid")
|
||||||
fmt.Fprintln(f, "sequenceDiagram")
|
fmt.Fprintln(f, "sequenceDiagram")
|
||||||
@@ -171,19 +215,24 @@ func (r *R) renderFlow() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
participants[addr] = struct{}{}
|
participants[addr] = struct{}{}
|
||||||
sanAddr := strings.Replace(addr, ":", "#58;", 1)
|
sanAddr := strings.Replace(addr, ":", "-", 1)
|
||||||
participansVals = append(participansVals, sanAddr)
|
participantsVals = append(participantsVals, sanAddr)
|
||||||
fmt.Fprintf(
|
fmt.Fprintf(
|
||||||
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
||||||
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
|
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(participantsVals) > 2 {
|
||||||
|
// Get the first and last participantVals for notes
|
||||||
|
participantsVals = []string{participantsVals[0], participantsVals[len(participantsVals)-1]}
|
||||||
|
}
|
||||||
|
|
||||||
// Print packets
|
// Print packets
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
for _, e := range r.flow {
|
for _, e := range r.flow {
|
||||||
if e.packet == nil {
|
if e.packet == nil {
|
||||||
fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participansVals, ", "), e.note)
|
//fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,15 +251,77 @@ func (r *R) renderFlow() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(f,
|
fmt.Fprintf(f,
|
||||||
" %s%s%s: %s(%s), counter: %v\n",
|
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
||||||
strings.Replace(p.from.GetUDPAddr(), ":", "#58;", 1),
|
strings.Replace(p.from.GetUDPAddr(), ":", "-", 1),
|
||||||
line,
|
line,
|
||||||
strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1),
|
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||||
h.TypeName(), h.SubTypeName(), h.MessageCounter,
|
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Fprintln(f, "```")
|
fmt.Fprintln(f, "```")
|
||||||
|
|
||||||
|
for _, g := range r.additionalGraphs {
|
||||||
|
fmt.Fprintf(f, "## %s\n", g.title)
|
||||||
|
fmt.Fprintln(f, "```mermaid")
|
||||||
|
fmt.Fprintln(f, g.content)
|
||||||
|
fmt.Fprintln(f, "```")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria.
|
||||||
|
// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets
|
||||||
|
// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered
|
||||||
|
func (r *R) IgnoreFlow(messageType header.MessageType, subType header.MessageSubType, tun NullBool) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
r.ignoreFlows = append(r.ignoreFlows, ignoreFlow{
|
||||||
|
tun,
|
||||||
|
messageType,
|
||||||
|
subType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
s := renderHostmaps(controls...)
|
||||||
|
if len(r.additionalGraphs) > 0 {
|
||||||
|
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
|
||||||
|
if lastGraph.content == s && lastGraph.title == title {
|
||||||
|
// Ignore this rendering if it matches the last rendering added
|
||||||
|
// This is useful if you want to track rendering changes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
|
||||||
|
title: title,
|
||||||
|
content: s,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *R) renderHostmaps(title string) {
|
||||||
|
c := maps.Values(r.controls)
|
||||||
|
sort.SliceStable(c, func(i, j int) bool {
|
||||||
|
return c[i].GetVpnIp() > c[j].GetVpnIp()
|
||||||
|
})
|
||||||
|
|
||||||
|
s := renderHostmaps(c...)
|
||||||
|
if len(r.additionalGraphs) > 0 {
|
||||||
|
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
|
||||||
|
if lastGraph.content == s {
|
||||||
|
// Ignore this rendering if it matches the last rendering added
|
||||||
|
// This is useful if you want to track rendering changes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
|
||||||
|
title: title,
|
||||||
|
content: s,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// InjectFlow can be used to record packet flow if the test is handling the routing on its own.
|
// InjectFlow can be used to record packet flow if the test is handling the routing on its own.
|
||||||
@@ -222,6 +333,10 @@ func (r *R) InjectFlow(from, to *nebula.Control, p *udp.Packet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *R) Log(arg ...any) {
|
func (r *R) Log(arg ...any) {
|
||||||
|
if r.flow == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
r.Lock()
|
r.Lock()
|
||||||
r.flow = append(r.flow, flowEntry{note: fmt.Sprint(arg...)})
|
r.flow = append(r.flow, flowEntry{note: fmt.Sprint(arg...)})
|
||||||
r.t.Log(arg...)
|
r.t.Log(arg...)
|
||||||
@@ -229,6 +344,10 @@ func (r *R) Log(arg ...any) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *R) Logf(format string, arg ...any) {
|
func (r *R) Logf(format string, arg ...any) {
|
||||||
|
if r.flow == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
r.Lock()
|
r.Lock()
|
||||||
r.flow = append(r.flow, flowEntry{note: fmt.Sprintf(format, arg...)})
|
r.flow = append(r.flow, flowEntry{note: fmt.Sprintf(format, arg...)})
|
||||||
r.t.Logf(format, arg...)
|
r.t.Logf(format, arg...)
|
||||||
@@ -236,14 +355,40 @@ func (r *R) Logf(format string, arg ...any) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// unlockedInjectFlow is used by the router to record a packet has been transmitted, the packet is returned and
|
// unlockedInjectFlow is used by the router to record a packet has been transmitted, the packet is returned and
|
||||||
// should be marked as received AFTER it has been placed on the receivers channel
|
// should be marked as received AFTER it has been placed on the receivers channel.
|
||||||
|
// If flow logs have been disabled this function will return nil
|
||||||
func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool) *packet {
|
func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool) *packet {
|
||||||
|
if r.flow == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.renderHostmaps(fmt.Sprintf("Packet %v", len(r.flow)))
|
||||||
|
|
||||||
|
if len(r.ignoreFlows) > 0 {
|
||||||
|
var h header.H
|
||||||
|
err := h.Parse(p.Data)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range r.ignoreFlows {
|
||||||
|
if !tun {
|
||||||
|
if i.messageType == h.Type && i.subType == h.Subtype {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else if i.tun.HasValue && i.tun.IsTrue {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fp := &packet{
|
fp := &packet{
|
||||||
from: from,
|
from: from,
|
||||||
to: to,
|
to: to,
|
||||||
packet: p.Copy(),
|
packet: p.Copy(),
|
||||||
tun: tun,
|
tun: tun,
|
||||||
}
|
}
|
||||||
|
|
||||||
r.flow = append(r.flow, flowEntry{packet: fp})
|
r.flow = append(r.flow, flowEntry{packet: fp})
|
||||||
return fp
|
return fp
|
||||||
}
|
}
|
||||||
@@ -285,7 +430,7 @@ func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []
|
|||||||
}
|
}
|
||||||
fp := r.unlockedInjectFlow(sender, c, p, false)
|
fp := r.unlockedInjectFlow(sender, c, p, false)
|
||||||
c.InjectUDPPacket(p)
|
c.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -344,7 +489,7 @@ func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
|
|||||||
}
|
}
|
||||||
fp := r.unlockedInjectFlow(cm[x], c, p, false)
|
fp := r.unlockedInjectFlow(cm[x], c, p, false)
|
||||||
c.InjectUDPPacket(p)
|
c.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
}
|
}
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
}
|
}
|
||||||
@@ -381,14 +526,14 @@ func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
|
|||||||
case RouteAndExit:
|
case RouteAndExit:
|
||||||
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
return
|
return
|
||||||
|
|
||||||
case KeepRouting:
|
case KeepRouting:
|
||||||
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
||||||
@@ -439,7 +584,7 @@ func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet
|
|||||||
|
|
||||||
fp := r.unlockedInjectFlow(sender, receiver, packet, false)
|
fp := r.unlockedInjectFlow(sender, receiver, packet, false)
|
||||||
receiver.InjectUDPPacket(packet)
|
receiver.InjectUDPPacket(packet)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
||||||
@@ -503,14 +648,14 @@ func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
|
|||||||
case RouteAndExit:
|
case RouteAndExit:
|
||||||
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
return
|
return
|
||||||
|
|
||||||
case KeepRouting:
|
case KeepRouting:
|
||||||
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
||||||
@@ -613,8 +758,8 @@ func (r *R) formatUdpPacket(p *packet) string {
|
|||||||
data := packet.ApplicationLayer()
|
data := packet.ApplicationLayer()
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
||||||
strings.Replace(from, ":", "#58;", 1),
|
strings.Replace(from, ":", "-", 1),
|
||||||
strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1),
|
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||||
udp.SrcPort,
|
udp.SrcPort,
|
||||||
udp.DstPort,
|
udp.DstPort,
|
||||||
string(data.Payload()),
|
string(data.Payload()),
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ pki:
|
|||||||
#blocklist:
|
#blocklist:
|
||||||
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
||||||
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
||||||
#disconnect_invalid: false
|
#disconnect_invalid: true
|
||||||
|
|
||||||
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
||||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||||
@@ -21,6 +21,19 @@ pki:
|
|||||||
static_host_map:
|
static_host_map:
|
||||||
"192.168.100.1": ["100.64.22.11:4242"]
|
"192.168.100.1": ["100.64.22.11:4242"]
|
||||||
|
|
||||||
|
# The static_map config stanza can be used to configure how the static_host_map behaves.
|
||||||
|
#static_map:
|
||||||
|
# cadence determines how frequently DNS is re-queried for updated IP addresses when a static_host_map entry contains
|
||||||
|
# a DNS name.
|
||||||
|
#cadence: 30s
|
||||||
|
|
||||||
|
# network determines the type of IP addresses to ask the DNS server for. The default is "ip4" because nodes typically
|
||||||
|
# do not know their public IPv4 address. Connecting to the Lighthouse via IPv4 allows the Lighthouse to detect the
|
||||||
|
# public address. Other valid options are "ip6" and "ip" (returns both.)
|
||||||
|
#network: ip4
|
||||||
|
|
||||||
|
# lookup_timeout is the DNS query timeout.
|
||||||
|
#lookup_timeout: 250ms
|
||||||
|
|
||||||
lighthouse:
|
lighthouse:
|
||||||
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
|
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
|
||||||
@@ -47,8 +60,9 @@ lighthouse:
|
|||||||
# allowed. You can provide CIDRs here with `true` to allow and `false` to
|
# allowed. You can provide CIDRs here with `true` to allow and `false` to
|
||||||
# deny. The most specific CIDR rule applies to each remote. If all rules are
|
# deny. The most specific CIDR rule applies to each remote. If all rules are
|
||||||
# "allow", the default will be "deny", and vice-versa. If both "allow" and
|
# "allow", the default will be "deny", and vice-versa. If both "allow" and
|
||||||
# "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
|
# "deny" IPv4 rules are present, then you MUST set a rule for "0.0.0.0/0" as
|
||||||
# default.
|
# the default. Similarly if both "allow" and "deny" IPv6 rules are present,
|
||||||
|
# then you MUST set a rule for "::/0" as the default.
|
||||||
#remote_allow_list:
|
#remote_allow_list:
|
||||||
# Example to block IPs from this subnet from being used for remote IPs.
|
# Example to block IPs from this subnet from being used for remote IPs.
|
||||||
#"172.16.0.0/12": false
|
#"172.16.0.0/12": false
|
||||||
@@ -58,7 +72,7 @@ lighthouse:
|
|||||||
#"10.0.0.0/8": false
|
#"10.0.0.0/8": false
|
||||||
#"10.42.42.0/24": true
|
#"10.42.42.0/24": true
|
||||||
|
|
||||||
# EXPERIMENTAL: This option my change or disappear in the future.
|
# EXPERIMENTAL: This option may change or disappear in the future.
|
||||||
# Optionally allows the definition of remote_allow_list blocks
|
# Optionally allows the definition of remote_allow_list blocks
|
||||||
# specific to an inside VPN IP CIDR.
|
# specific to an inside VPN IP CIDR.
|
||||||
#remote_allow_ranges:
|
#remote_allow_ranges:
|
||||||
@@ -90,10 +104,23 @@ lighthouse:
|
|||||||
#- "1.1.1.1:4242"
|
#- "1.1.1.1:4242"
|
||||||
#- "1.2.3.4:0" # port will be replaced with the real listening port
|
#- "1.2.3.4:0" # port will be replaced with the real listening port
|
||||||
|
|
||||||
|
# EXPERIMENTAL: This option may change or disappear in the future.
|
||||||
|
# This setting allows us to "guess" what the remote might be for a host
|
||||||
|
# while we wait for the lighthouse response.
|
||||||
|
#calculated_remotes:
|
||||||
|
# For any Nebula IPs in 10.0.10.0/24, this will apply the mask and add
|
||||||
|
# the calculated IP as an initial remote (while we wait for the response
|
||||||
|
# from the lighthouse). Both CIDRs must have the same mask size.
|
||||||
|
# For example, Nebula IP 10.0.10.123 will have a calculated remote of
|
||||||
|
# 192.168.1.123
|
||||||
|
#10.0.10.0/24:
|
||||||
|
#- mask: 192.168.1.0/24
|
||||||
|
# port: 4242
|
||||||
|
|
||||||
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
||||||
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
||||||
listen:
|
listen:
|
||||||
# To listen on both any ipv4 and ipv6 use "[::]"
|
# To listen on both any ipv4 and ipv6 use "::"
|
||||||
host: 0.0.0.0
|
host: 0.0.0.0
|
||||||
port: 4242
|
port: 4242
|
||||||
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
||||||
@@ -128,12 +155,15 @@ punchy:
|
|||||||
# Default is false
|
# Default is false
|
||||||
#respond: true
|
#respond: true
|
||||||
|
|
||||||
# delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
|
# delays a punch response for misbehaving NATs, default is 1 second.
|
||||||
#delay: 1s
|
#delay: 1s
|
||||||
|
|
||||||
|
# set the delay before attempting punchy.respond. Default is 5 seconds. respond must be true to take effect.
|
||||||
|
#respond_delay: 5s
|
||||||
|
|
||||||
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
|
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
|
||||||
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
|
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
|
||||||
#cipher: chachapoly
|
#cipher: aes
|
||||||
|
|
||||||
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
||||||
# path to a network adjacent nebula node.
|
# path to a network adjacent nebula node.
|
||||||
@@ -141,7 +171,8 @@ punchy:
|
|||||||
# and has been deprecated for "preferred_ranges"
|
# and has been deprecated for "preferred_ranges"
|
||||||
#preferred_ranges: ["172.16.0.0/24"]
|
#preferred_ranges: ["172.16.0.0/24"]
|
||||||
|
|
||||||
# sshd can expose informational and administrative functions via ssh this is a
|
# sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
|
||||||
|
# functions, and allows manual tweaking of various network settings when debugging or testing.
|
||||||
#sshd:
|
#sshd:
|
||||||
# Toggles the feature
|
# Toggles the feature
|
||||||
#enabled: true
|
#enabled: true
|
||||||
@@ -177,7 +208,7 @@ tun:
|
|||||||
disabled: false
|
disabled: false
|
||||||
# Name of the device. If not set, a default will be chosen by the OS.
|
# Name of the device. If not set, a default will be chosen by the OS.
|
||||||
# For macOS: if set, must be in the form `utun[0-9]+`.
|
# For macOS: if set, must be in the form `utun[0-9]+`.
|
||||||
# For FreeBSD: Required to be set, must be in the form `tun[0-9]+`.
|
# For NetBSD: Required to be set, must be in the form `tun[0-9]+`
|
||||||
dev: nebula1
|
dev: nebula1
|
||||||
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
||||||
drop_local_broadcast: false
|
drop_local_broadcast: false
|
||||||
@@ -187,21 +218,28 @@ tun:
|
|||||||
tx_queue: 500
|
tx_queue: 500
|
||||||
# Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
|
# Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
|
||||||
mtu: 1300
|
mtu: 1300
|
||||||
|
|
||||||
# Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
|
# Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
|
||||||
routes:
|
routes:
|
||||||
#- mtu: 8800
|
#- mtu: 8800
|
||||||
# route: 10.0.0.0/16
|
# route: 10.0.0.0/16
|
||||||
|
|
||||||
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
||||||
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
||||||
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
||||||
# `mtu` will default to tun mtu if this option is not specified
|
# `mtu`: will default to tun mtu if this option is not specified
|
||||||
# `metric` will default to 0 if this option is not specified
|
# `metric`: will default to 0 if this option is not specified
|
||||||
|
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||||
unsafe_routes:
|
unsafe_routes:
|
||||||
#- route: 172.16.1.0/24
|
#- route: 172.16.1.0/24
|
||||||
# via: 192.168.100.99
|
# via: 192.168.100.99
|
||||||
# mtu: 1300
|
# mtu: 1300
|
||||||
# metric: 100
|
# metric: 100
|
||||||
|
# install: true
|
||||||
|
|
||||||
|
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
|
||||||
|
# in nebula configuration files. Default false, not reloadable.
|
||||||
|
#use_system_route_table: false
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# Configure logging level
|
# Configure logging level
|
||||||
@@ -251,6 +289,10 @@ logging:
|
|||||||
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
|
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
|
||||||
#try_interval: 100ms
|
#try_interval: 100ms
|
||||||
#retries: 20
|
#retries: 20
|
||||||
|
|
||||||
|
# query_buffer is the size of the buffer channel for querying lighthouses
|
||||||
|
#query_buffer: 64
|
||||||
|
|
||||||
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
|
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
|
||||||
# after receiving the response for lighthouse queries
|
# after receiving the response for lighthouse queries
|
||||||
#trigger_buffer: 64
|
#trigger_buffer: 64
|
||||||
@@ -258,6 +300,15 @@ logging:
|
|||||||
|
|
||||||
# Nebula security group configuration
|
# Nebula security group configuration
|
||||||
firewall:
|
firewall:
|
||||||
|
# Action to take when a packet is not allowed by the firewall rules.
|
||||||
|
# Can be one of:
|
||||||
|
# `drop` (default): silently drop the packet.
|
||||||
|
# `reject`: send a reject reply.
|
||||||
|
# - For TCP, this will be a RST "Connection Reset" packet.
|
||||||
|
# - For other protocols, this will be an ICMP port unreachable packet.
|
||||||
|
outbound_action: drop
|
||||||
|
inbound_action: drop
|
||||||
|
|
||||||
conntrack:
|
conntrack:
|
||||||
tcp_timeout: 12m
|
tcp_timeout: 12m
|
||||||
udp_timeout: 3m
|
udp_timeout: 3m
|
||||||
@@ -272,7 +323,8 @@ firewall:
|
|||||||
# host: `any` or a literal hostname, ie `test-host`
|
# host: `any` or a literal hostname, ie `test-host`
|
||||||
# group: `any` or a literal group name, ie `default-group`
|
# group: `any` or a literal group name, ie `default-group`
|
||||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||||
# cidr: a CIDR, `0.0.0.0/0` is any.
|
# cidr: a remote CIDR, `0.0.0.0/0` is any.
|
||||||
|
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
|
||||||
# ca_name: An issuing CA name
|
# ca_name: An issuing CA name
|
||||||
# ca_sha: An issuing CA shasum
|
# ca_sha: An issuing CA shasum
|
||||||
|
|
||||||
|
|||||||
100
examples/go_service/main.go
Normal file
100
examples/go_service/main.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/service"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := run(); err != nil {
|
||||||
|
log.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func run() error {
|
||||||
|
configStr := `
|
||||||
|
tun:
|
||||||
|
user: true
|
||||||
|
|
||||||
|
static_host_map:
|
||||||
|
'192.168.100.1': ['localhost:4242']
|
||||||
|
|
||||||
|
listen:
|
||||||
|
host: 0.0.0.0
|
||||||
|
port: 4241
|
||||||
|
|
||||||
|
lighthouse:
|
||||||
|
am_lighthouse: false
|
||||||
|
interval: 60
|
||||||
|
hosts:
|
||||||
|
- '192.168.100.1'
|
||||||
|
|
||||||
|
firewall:
|
||||||
|
outbound:
|
||||||
|
# Allow all outbound traffic from this node
|
||||||
|
- port: any
|
||||||
|
proto: any
|
||||||
|
host: any
|
||||||
|
|
||||||
|
inbound:
|
||||||
|
# Allow icmp between any nebula hosts
|
||||||
|
- port: any
|
||||||
|
proto: icmp
|
||||||
|
host: any
|
||||||
|
- port: any
|
||||||
|
proto: any
|
||||||
|
host: any
|
||||||
|
|
||||||
|
pki:
|
||||||
|
ca: /home/rice/Developer/nebula-config/ca.crt
|
||||||
|
cert: /home/rice/Developer/nebula-config/app.crt
|
||||||
|
key: /home/rice/Developer/nebula-config/app.key
|
||||||
|
`
|
||||||
|
var config config.C
|
||||||
|
if err := config.LoadString(configStr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
service, err := service.New(&config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ln, err := service.Listen("tcp", ":1234")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
conn, err := ln.Accept()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("accept error: %s", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
log.Printf("got connection")
|
||||||
|
|
||||||
|
conn.Write([]byte("hello world\n"))
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(conn)
|
||||||
|
for scanner.Scan() {
|
||||||
|
message := scanner.Text()
|
||||||
|
fmt.Fprintf(conn, "echo: %q\n", message)
|
||||||
|
log.Printf("got message %q", message)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
log.Printf("scanner error: %s", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service.Close()
|
||||||
|
if err := service.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=nebula
|
Description=Nebula overlay networking tool
|
||||||
Wants=basic.target
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target
|
After=basic.target network.target network-online.target
|
||||||
|
Before=sshd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
SyslogIdentifier=nebula
|
SyslogIdentifier=nebula
|
||||||
|
|||||||
34
examples/service_scripts/nebula.plist
Normal file
34
examples/service_scripts/nebula.plist
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||||
|
<plist version="1.0">
|
||||||
|
<dict>
|
||||||
|
<key>KeepAlive</key>
|
||||||
|
<true/>
|
||||||
|
<key>Label</key>
|
||||||
|
<string>net.defined.nebula</string>
|
||||||
|
<key>WorkingDirectory</key>
|
||||||
|
<string>/Users/{username}/.local/bin/nebula</string>
|
||||||
|
<key>LimitLoadToSessionType</key>
|
||||||
|
<array>
|
||||||
|
<string>Aqua</string>
|
||||||
|
<string>Background</string>
|
||||||
|
<string>LoginWindow</string>
|
||||||
|
<string>StandardIO</string>
|
||||||
|
<string>System</string>
|
||||||
|
</array>
|
||||||
|
<key>ProgramArguments</key>
|
||||||
|
<array>
|
||||||
|
<string>./nebula</string>
|
||||||
|
<string>-config</string>
|
||||||
|
<string>./config.yml</string>
|
||||||
|
</array>
|
||||||
|
<key>RunAtLoad</key>
|
||||||
|
<true/>
|
||||||
|
<key>StandardErrorPath</key>
|
||||||
|
<string>./nebula.log</string>
|
||||||
|
<key>StandardOutPath</key>
|
||||||
|
<string>./nebula.log</string>
|
||||||
|
<key>UserName</key>
|
||||||
|
<string>root</string>
|
||||||
|
</dict>
|
||||||
|
</plist>
|
||||||
@@ -1,10 +1,12 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=nebula
|
Description=Nebula overlay networking tool
|
||||||
Wants=basic.target
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target
|
After=basic.target network.target network-online.target
|
||||||
Before=sshd.service
|
Before=sshd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
SyslogIdentifier=nebula
|
SyslogIdentifier=nebula
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||||
|
|||||||
170
firewall.go
170
firewall.go
@@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
"net"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -25,7 +26,7 @@ const tcpACK = 0x10
|
|||||||
const tcpFIN = 0x01
|
const tcpFIN = 0x01
|
||||||
|
|
||||||
type FirewallInterface interface {
|
type FirewallInterface interface {
|
||||||
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error
|
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type conn struct {
|
type conn struct {
|
||||||
@@ -47,6 +48,9 @@ type Firewall struct {
|
|||||||
InRules *FirewallTable
|
InRules *FirewallTable
|
||||||
OutRules *FirewallTable
|
OutRules *FirewallTable
|
||||||
|
|
||||||
|
InSendReject bool
|
||||||
|
OutSendReject bool
|
||||||
|
|
||||||
//TODO: we should have many more options for TCP, an option for ICMP, and mimic the kernel a bit better
|
//TODO: we should have many more options for TCP, an option for ICMP, and mimic the kernel a bit better
|
||||||
// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
|
// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
|
||||||
TCPTimeout time.Duration //linux: 5 days max
|
TCPTimeout time.Duration //linux: 5 days max
|
||||||
@@ -54,7 +58,7 @@ type Firewall struct {
|
|||||||
DefaultTimeout time.Duration //linux: 600s
|
DefaultTimeout time.Duration //linux: 600s
|
||||||
|
|
||||||
// Used to ensure we don't emit local packets for ips we don't own
|
// Used to ensure we don't emit local packets for ips we don't own
|
||||||
localIps *cidr.Tree4
|
localIps *cidr.Tree4[struct{}]
|
||||||
|
|
||||||
rules string
|
rules string
|
||||||
rulesVersion uint16
|
rulesVersion uint16
|
||||||
@@ -77,7 +81,7 @@ type FirewallConntrack struct {
|
|||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
Conns map[firewall.Packet]*conn
|
Conns map[firewall.Packet]*conn
|
||||||
TimerWheel *TimerWheel
|
TimerWheel *TimerWheel[firewall.Packet]
|
||||||
}
|
}
|
||||||
|
|
||||||
type FirewallTable struct {
|
type FirewallTable struct {
|
||||||
@@ -103,11 +107,12 @@ type FirewallCA struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type FirewallRule struct {
|
type FirewallRule struct {
|
||||||
// Any makes Hosts, Groups, and CIDR irrelevant
|
// Any makes Hosts, Groups, CIDR and LocalCIDR irrelevant
|
||||||
Any bool
|
Any bool
|
||||||
Hosts map[string]struct{}
|
Hosts map[string]struct{}
|
||||||
Groups [][]string
|
Groups [][]string
|
||||||
CIDR *cidr.Tree4
|
CIDR *cidr.Tree4[struct{}]
|
||||||
|
LocalCIDR *cidr.Tree4[struct{}]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Even though ports are uint16, int32 maps are faster for lookup
|
// Even though ports are uint16, int32 maps are faster for lookup
|
||||||
@@ -133,7 +138,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
|||||||
max = defaultTimeout
|
max = defaultTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
localIps := cidr.NewTree4()
|
localIps := cidr.NewTree4[struct{}]()
|
||||||
for _, ip := range c.Details.Ips {
|
for _, ip := range c.Details.Ips {
|
||||||
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||||
}
|
}
|
||||||
@@ -145,7 +150,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
|||||||
return &Firewall{
|
return &Firewall{
|
||||||
Conntrack: &FirewallConntrack{
|
Conntrack: &FirewallConntrack{
|
||||||
Conns: make(map[firewall.Packet]*conn),
|
Conns: make(map[firewall.Packet]*conn),
|
||||||
TimerWheel: NewTimerWheel(min, max),
|
TimerWheel: NewTimerWheel[firewall.Packet](min, max),
|
||||||
},
|
},
|
||||||
InRules: newFirewallTable(),
|
InRules: newFirewallTable(),
|
||||||
OutRules: newFirewallTable(),
|
OutRules: newFirewallTable(),
|
||||||
@@ -179,6 +184,28 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
|||||||
//TODO: max_connections
|
//TODO: max_connections
|
||||||
)
|
)
|
||||||
|
|
||||||
|
inboundAction := c.GetString("firewall.inbound_action", "drop")
|
||||||
|
switch inboundAction {
|
||||||
|
case "reject":
|
||||||
|
fw.InSendReject = true
|
||||||
|
case "drop":
|
||||||
|
fw.InSendReject = false
|
||||||
|
default:
|
||||||
|
l.WithField("action", inboundAction).Warn("invalid firewall.inbound_action, defaulting to `drop`")
|
||||||
|
fw.InSendReject = false
|
||||||
|
}
|
||||||
|
|
||||||
|
outboundAction := c.GetString("firewall.outbound_action", "drop")
|
||||||
|
switch outboundAction {
|
||||||
|
case "reject":
|
||||||
|
fw.OutSendReject = true
|
||||||
|
case "drop":
|
||||||
|
fw.OutSendReject = false
|
||||||
|
default:
|
||||||
|
l.WithField("action", inboundAction).Warn("invalid firewall.outbound_action, defaulting to `drop`")
|
||||||
|
fw.OutSendReject = false
|
||||||
|
}
|
||||||
|
|
||||||
err := AddFirewallRulesFromConfig(l, false, c, fw)
|
err := AddFirewallRulesFromConfig(l, false, c, fw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -193,18 +220,22 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddRule properly creates the in memory rule structure for a firewall table.
|
// AddRule properly creates the in memory rule structure for a firewall table.
|
||||||
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
||||||
// https://github.com/golang/go/issues/14131
|
// https://github.com/golang/go/issues/14131
|
||||||
sIp := ""
|
sIp := ""
|
||||||
if ip != nil {
|
if ip != nil {
|
||||||
sIp = ip.String()
|
sIp = ip.String()
|
||||||
}
|
}
|
||||||
|
lIp := ""
|
||||||
|
if localIp != nil {
|
||||||
|
lIp = localIp.String()
|
||||||
|
}
|
||||||
|
|
||||||
// We need this rule string because we generate a hash. Removing this will break firewall reload.
|
// We need this rule string because we generate a hash. Removing this will break firewall reload.
|
||||||
ruleString := fmt.Sprintf(
|
ruleString := fmt.Sprintf(
|
||||||
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s",
|
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, localIp: %v, caName: %v, caSha: %s",
|
||||||
incoming, proto, startPort, endPort, groups, host, sIp, caName, caSha,
|
incoming, proto, startPort, endPort, groups, host, sIp, lIp, caName, caSha,
|
||||||
)
|
)
|
||||||
f.rules += ruleString + "\n"
|
f.rules += ruleString + "\n"
|
||||||
|
|
||||||
@@ -212,7 +243,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
|||||||
if !incoming {
|
if !incoming {
|
||||||
direction = "outgoing"
|
direction = "outgoing"
|
||||||
}
|
}
|
||||||
f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "caName": caName, "caSha": caSha}).
|
f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "localIp": lIp, "caName": caName, "caSha": caSha}).
|
||||||
Info("Firewall rule added")
|
Info("Firewall rule added")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -239,7 +270,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
|||||||
return fmt.Errorf("unknown protocol %v", proto)
|
return fmt.Errorf("unknown protocol %v", proto)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fp.addRule(startPort, endPort, groups, host, ip, caName, caSha)
|
return fp.addRule(startPort, endPort, groups, host, ip, localIp, caName, caSha)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRuleHash returns a hash representation of all inbound and outbound rules
|
// GetRuleHash returns a hash representation of all inbound and outbound rules
|
||||||
@@ -248,6 +279,18 @@ func (f *Firewall) GetRuleHash() string {
|
|||||||
return hex.EncodeToString(sum[:])
|
return hex.EncodeToString(sum[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRuleHashFNV returns a uint32 FNV-1 hash representation the rules, for use as a metric value
|
||||||
|
func (f *Firewall) GetRuleHashFNV() uint32 {
|
||||||
|
h := fnv.New32a()
|
||||||
|
h.Write([]byte(f.rules))
|
||||||
|
return h.Sum32()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRuleHashes returns both the sha256 and FNV-1 hashes, suitable for logging
|
||||||
|
func (f *Firewall) GetRuleHashes() string {
|
||||||
|
return "SHA:" + f.GetRuleHash() + ",FNV:" + strconv.FormatUint(uint64(f.GetRuleHashFNV()), 10)
|
||||||
|
}
|
||||||
|
|
||||||
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
|
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
|
||||||
var table string
|
var table string
|
||||||
if inbound {
|
if inbound {
|
||||||
@@ -277,8 +320,8 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
|||||||
return fmt.Errorf("%s rule #%v; only one of port or code should be provided", table, i)
|
return fmt.Errorf("%s rule #%v; only one of port or code should be provided", table, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Host == "" && len(r.Groups) == 0 && r.Group == "" && r.Cidr == "" && r.CAName == "" && r.CASha == "" {
|
if r.Host == "" && len(r.Groups) == 0 && r.Group == "" && r.Cidr == "" && r.LocalCidr == "" && r.CAName == "" && r.CASha == "" {
|
||||||
return fmt.Errorf("%s rule #%v; at least one of host, group, cidr, ca_name, or ca_sha must be provided", table, i)
|
return fmt.Errorf("%s rule #%v; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided", table, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(r.Groups) > 0 {
|
if len(r.Groups) > 0 {
|
||||||
@@ -330,7 +373,15 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = fw.AddRule(inbound, proto, startPort, endPort, groups, r.Host, cidr, r.CAName, r.CASha)
|
var localCidr *net.IPNet
|
||||||
|
if r.LocalCidr != "" {
|
||||||
|
_, localCidr, err = net.ParseCIDR(r.LocalCidr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fw.AddRule(inbound, proto, startPort, endPort, groups, r.Host, cidr, localCidr, r.CAName, r.CASha)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s rule #%v; `%s`", table, i, err)
|
return fmt.Errorf("%s rule #%v; `%s`", table, i, err)
|
||||||
}
|
}
|
||||||
@@ -353,7 +404,8 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
|
|||||||
|
|
||||||
// Make sure remote address matches nebula certificate
|
// Make sure remote address matches nebula certificate
|
||||||
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
||||||
if remoteCidr.Contains(fp.RemoteIP) == nil {
|
ok, _ := remoteCidr.Contains(fp.RemoteIP)
|
||||||
|
if !ok {
|
||||||
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||||
return ErrInvalidRemoteIP
|
return ErrInvalidRemoteIP
|
||||||
}
|
}
|
||||||
@@ -366,7 +418,8 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we are supposed to be handling this local ip address
|
// Make sure we are supposed to be handling this local ip address
|
||||||
if f.localIps.Contains(fp.LocalIP) == nil {
|
ok, _ := f.localIps.Contains(fp.LocalIP)
|
||||||
|
if !ok {
|
||||||
f.metrics(incoming).droppedLocalIP.Inc(1)
|
f.metrics(incoming).droppedLocalIP.Inc(1)
|
||||||
return ErrInvalidLocalIP
|
return ErrInvalidLocalIP
|
||||||
}
|
}
|
||||||
@@ -409,6 +462,7 @@ func (f *Firewall) EmitStats() {
|
|||||||
conntrack.Unlock()
|
conntrack.Unlock()
|
||||||
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
|
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
|
||||||
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
|
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
|
||||||
|
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||||
@@ -510,6 +564,7 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
|
|||||||
conntrack := f.Conntrack
|
conntrack := f.Conntrack
|
||||||
conntrack.Lock()
|
conntrack.Lock()
|
||||||
if _, ok := conntrack.Conns[fp]; !ok {
|
if _, ok := conntrack.Conns[fp]; !ok {
|
||||||
|
conntrack.TimerWheel.Advance(time.Now())
|
||||||
conntrack.TimerWheel.Add(fp, timeout)
|
conntrack.TimerWheel.Add(fp, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -537,6 +592,7 @@ func (f *Firewall) evict(p firewall.Packet) {
|
|||||||
|
|
||||||
// Timeout is in the future, re-add the timer
|
// Timeout is in the future, re-add the timer
|
||||||
if newT > 0 {
|
if newT > 0 {
|
||||||
|
conntrack.TimerWheel.Advance(time.Now())
|
||||||
conntrack.TimerWheel.Add(p, newT)
|
conntrack.TimerWheel.Add(p, newT)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -568,7 +624,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaC
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
if startPort > endPort {
|
if startPort > endPort {
|
||||||
return fmt.Errorf("start port was lower than end port")
|
return fmt.Errorf("start port was lower than end port")
|
||||||
}
|
}
|
||||||
@@ -581,7 +637,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fp[i].addRule(groups, host, ip, caName, caSha); err != nil {
|
if err := fp[i].addRule(groups, host, ip, localIp, caName, caSha); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -612,12 +668,13 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
|
|||||||
return fp[firewall.PortAny].match(p, c, caPool)
|
return fp[firewall.PortAny].match(p, c, caPool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error {
|
func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error {
|
||||||
fr := func() *FirewallRule {
|
fr := func() *FirewallRule {
|
||||||
return &FirewallRule{
|
return &FirewallRule{
|
||||||
Hosts: make(map[string]struct{}),
|
Hosts: make(map[string]struct{}),
|
||||||
Groups: make([][]string, 0),
|
Groups: make([][]string, 0),
|
||||||
CIDR: cidr.NewTree4(),
|
CIDR: cidr.NewTree4[struct{}](),
|
||||||
|
LocalCIDR: cidr.NewTree4[struct{}](),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -626,14 +683,14 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
|
|||||||
fc.Any = fr()
|
fc.Any = fr()
|
||||||
}
|
}
|
||||||
|
|
||||||
return fc.Any.addRule(groups, host, ip)
|
return fc.Any.addRule(groups, host, ip, localIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if caSha != "" {
|
if caSha != "" {
|
||||||
if _, ok := fc.CAShas[caSha]; !ok {
|
if _, ok := fc.CAShas[caSha]; !ok {
|
||||||
fc.CAShas[caSha] = fr()
|
fc.CAShas[caSha] = fr()
|
||||||
}
|
}
|
||||||
err := fc.CAShas[caSha].addRule(groups, host, ip)
|
err := fc.CAShas[caSha].addRule(groups, host, ip, localIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -643,7 +700,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
|
|||||||
if _, ok := fc.CANames[caName]; !ok {
|
if _, ok := fc.CANames[caName]; !ok {
|
||||||
fc.CANames[caName] = fr()
|
fc.CANames[caName] = fr()
|
||||||
}
|
}
|
||||||
err := fc.CANames[caName].addRule(groups, host, ip)
|
err := fc.CANames[caName].addRule(groups, host, ip, localIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -675,17 +732,18 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool
|
|||||||
return fc.CANames[s.Details.Name].match(p, c)
|
return fc.CANames[s.Details.Name].match(p, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) error {
|
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, localIp *net.IPNet) error {
|
||||||
if fr.Any {
|
if fr.Any {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if fr.isAny(groups, host, ip) {
|
if fr.isAny(groups, host, ip, localIp) {
|
||||||
fr.Any = true
|
fr.Any = true
|
||||||
// If it's any we need to wipe out any pre-existing rules to save on memory
|
// If it's any we need to wipe out any pre-existing rules to save on memory
|
||||||
fr.Groups = make([][]string, 0)
|
fr.Groups = make([][]string, 0)
|
||||||
fr.Hosts = make(map[string]struct{})
|
fr.Hosts = make(map[string]struct{})
|
||||||
fr.CIDR = cidr.NewTree4()
|
fr.CIDR = cidr.NewTree4[struct{}]()
|
||||||
|
fr.LocalCIDR = cidr.NewTree4[struct{}]()
|
||||||
} else {
|
} else {
|
||||||
if len(groups) > 0 {
|
if len(groups) > 0 {
|
||||||
fr.Groups = append(fr.Groups, groups)
|
fr.Groups = append(fr.Groups, groups)
|
||||||
@@ -698,13 +756,17 @@ func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) err
|
|||||||
if ip != nil {
|
if ip != nil {
|
||||||
fr.CIDR.AddCIDR(ip, struct{}{})
|
fr.CIDR.AddCIDR(ip, struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if localIp != nil {
|
||||||
|
fr.LocalCIDR.AddCIDR(localIp, struct{}{})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool {
|
func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPNet) bool {
|
||||||
if len(groups) == 0 && host == "" && ip == nil {
|
if len(groups) == 0 && host == "" && ip == nil && localIp == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -722,6 +784,10 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if localIp != nil && localIp.Contains(net.IPv4(0, 0, 0, 0)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -759,8 +825,18 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if fr.CIDR != nil && fr.CIDR.Contains(p.RemoteIP) != nil {
|
if fr.CIDR != nil {
|
||||||
return true
|
ok, _ := fr.CIDR.Contains(p.RemoteIP)
|
||||||
|
if ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fr.LocalCIDR != nil {
|
||||||
|
ok, _ := fr.LocalCIDR.Contains(p.LocalIP)
|
||||||
|
if ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No host, group, or cidr matched, bye bye
|
// No host, group, or cidr matched, bye bye
|
||||||
@@ -768,15 +844,16 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
type rule struct {
|
type rule struct {
|
||||||
Port string
|
Port string
|
||||||
Code string
|
Code string
|
||||||
Proto string
|
Proto string
|
||||||
Host string
|
Host string
|
||||||
Group string
|
Group string
|
||||||
Groups []string
|
Groups []string
|
||||||
Cidr string
|
Cidr string
|
||||||
CAName string
|
LocalCidr string
|
||||||
CASha string
|
CAName string
|
||||||
|
CASha string
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
|
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
|
||||||
@@ -800,6 +877,7 @@ func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, er
|
|||||||
r.Proto = toString("proto", m)
|
r.Proto = toString("proto", m)
|
||||||
r.Host = toString("host", m)
|
r.Host = toString("host", m)
|
||||||
r.Cidr = toString("cidr", m)
|
r.Cidr = toString("cidr", m)
|
||||||
|
r.LocalCidr = toString("local_cidr", m)
|
||||||
r.CAName = toString("ca_name", m)
|
r.CAName = toString("ca_name", m)
|
||||||
r.CASha = toString("ca_sha", m)
|
r.CASha = toString("ca_sha", m)
|
||||||
|
|
||||||
@@ -879,7 +957,7 @@ func parsePort(s string) (startPort, endPort int32, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: write tests for these
|
// TODO: write tests for these
|
||||||
func setTCPRTTTracking(c *conn, p []byte) {
|
func setTCPRTTTracking(c *conn, p []byte) {
|
||||||
if c.Seq != 0 {
|
if c.Seq != 0 {
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ type ConntrackCache map[Packet]struct{}
|
|||||||
|
|
||||||
type ConntrackCacheTicker struct {
|
type ConntrackCacheTicker struct {
|
||||||
cacheV uint64
|
cacheV uint64
|
||||||
cacheTick uint64
|
cacheTick atomic.Uint64
|
||||||
|
|
||||||
cache ConntrackCache
|
cache ConntrackCache
|
||||||
}
|
}
|
||||||
@@ -35,7 +35,7 @@ func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
|
|||||||
func (c *ConntrackCacheTicker) tick(d time.Duration) {
|
func (c *ConntrackCacheTicker) tick(d time.Duration) {
|
||||||
for {
|
for {
|
||||||
time.Sleep(d)
|
time.Sleep(d)
|
||||||
atomic.AddUint64(&c.cacheTick, 1)
|
c.cacheTick.Add(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache {
|
|||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if tick := atomic.LoadUint64(&c.cacheTick); tick != c.cacheV {
|
if tick := c.cacheTick.Load(); tick != c.cacheV {
|
||||||
c.cacheV = tick
|
c.cacheV = tick
|
||||||
if ll := len(c.cache); ll > 0 {
|
if ll := len(c.cache); ll > 0 {
|
||||||
if l.Level == logrus.DebugLevel {
|
if l.Level == logrus.DebugLevel {
|
||||||
|
|||||||
215
firewall_test.go
215
firewall_test.go
@@ -34,27 +34,27 @@ func TestNewFirewall(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
|
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
|
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
|
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
|
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
|
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_AddRule(t *testing.T) {
|
func TestFirewall_AddRule(t *testing.T) {
|
||||||
@@ -69,67 +69,79 @@ func TestFirewall_AddRule(t *testing.T) {
|
|||||||
|
|
||||||
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
||||||
|
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", ""))
|
||||||
// An empty rule is any
|
// An empty rule is any
|
||||||
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", ""))
|
||||||
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
||||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
||||||
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", ""))
|
||||||
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
||||||
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", ""))
|
||||||
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
ok, _ := fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", ""))
|
||||||
|
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||||
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||||
|
ok, _ = fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP))
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha"))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
||||||
|
|
||||||
// Set any and clear fields
|
// Set any and clear fields
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", ""))
|
||||||
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
||||||
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
ok, _ = fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))
|
||||||
|
assert.True(t, ok)
|
||||||
|
ok, _ = fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP))
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
// run twice just to make sure
|
// run twice just to make sure
|
||||||
//TODO: these ANY rules should clear the CA firewall portion
|
//TODO: these ANY rules should clear the CA firewall portion
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
|
|
||||||
// Test error conditions
|
// Test error conditions
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", ""))
|
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, nil, "", ""))
|
||||||
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, "", ""))
|
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, nil, "", ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop(t *testing.T) {
|
func TestFirewall_Drop(t *testing.T) {
|
||||||
@@ -138,12 +150,12 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
LocalPort: 10,
|
||||||
90,
|
RemotePort: 90,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -169,7 +181,7 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
@@ -188,28 +200,28 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
|
|
||||||
// ensure signer doesn't get in the way of group checks
|
// ensure signer doesn't get in the way of group checks
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum"))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caSha doesn't drop on match
|
// test caSha doesn't drop on match
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum"))
|
||||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
|
|
||||||
// ensure ca name doesn't get in the way of group checks
|
// ensure ca name doesn't get in the way of group checks
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", ""))
|
||||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caName doesn't drop on match
|
// test caName doesn't drop on match
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", ""))
|
||||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,11 +231,11 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, n, _ := net.ParseCIDR("172.1.1.1/32")
|
_, n, _ := net.ParseCIDR("172.1.1.1/32")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, n, "", "")
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
b.Run("fail on proto", func(b *testing.B) {
|
b.Run("fail on proto", func(b *testing.B) {
|
||||||
@@ -291,7 +303,20 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "")
|
b.Run("pass on local ip", func(b *testing.B) {
|
||||||
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
c := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
Name: "good-host",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
|
||||||
|
|
||||||
b.Run("pass on ip with any port", func(b *testing.B) {
|
b.Run("pass on ip with any port", func(b *testing.B) {
|
||||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
@@ -305,6 +330,19 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
b.Run("pass on local ip with any port", func(b *testing.B) {
|
||||||
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
c := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
Name: "good-host",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop2(t *testing.T) {
|
func TestFirewall_Drop2(t *testing.T) {
|
||||||
@@ -313,12 +351,12 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
LocalPort: 10,
|
||||||
90,
|
RemotePort: 90,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -356,7 +394,7 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
h1.CreateRemoteCIDR(&c1)
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// h1/c1 lacks the proper groups
|
// h1/c1 lacks the proper groups
|
||||||
@@ -372,12 +410,12 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
1,
|
LocalPort: 1,
|
||||||
1,
|
RemotePort: 1,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -438,8 +476,8 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
h3.CreateRemoteCIDR(&c3)
|
h3.CreateRemoteCIDR(&c3)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, nil, "", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, nil, "", "signer-sha"))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// c1 should pass because host match
|
// c1 should pass because host match
|
||||||
@@ -458,12 +496,12 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
LocalPort: 10,
|
||||||
90,
|
RemotePort: 90,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -489,7 +527,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
@@ -502,7 +540,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
|
|
||||||
oldFw := fw
|
oldFw := fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, nil, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
@@ -511,7 +549,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
|
|
||||||
oldFw = fw
|
oldFw = fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, nil, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
@@ -653,7 +691,7 @@ func TestNewFirewallFromConfig(t *testing.T) {
|
|||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
||||||
_, err = NewFirewallFromConfig(l, c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided")
|
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided")
|
||||||
|
|
||||||
// Test code/port error
|
// Test code/port error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
@@ -677,6 +715,12 @@ func TestNewFirewallFromConfig(t *testing.T) {
|
|||||||
_, err = NewFirewallFromConfig(l, c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
||||||
|
|
||||||
|
// Test local_cidr parse error
|
||||||
|
conf = config.NewC(l)
|
||||||
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
||||||
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
|
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; invalid CIDR address: testh")
|
||||||
|
|
||||||
// Test both group and groups
|
// Test both group and groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
||||||
@@ -691,63 +735,78 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
|||||||
mf := &mockFirewall{}
|
mf := &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding udp rule
|
// Test adding udp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding icmp rule
|
// Test adding icmp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding any rule
|
// Test adding any rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
|
// Test adding rule with cidr
|
||||||
|
cidr := &net.IPNet{IP: net.ParseIP("10.0.0.0").To4(), Mask: net.IPv4Mask(255, 0, 0, 0)}
|
||||||
|
conf = config.NewC(l)
|
||||||
|
mf = &mockFirewall{}
|
||||||
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
||||||
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
|
// Test adding rule with local_cidr
|
||||||
|
conf = config.NewC(l)
|
||||||
|
mf = &mockFirewall{}
|
||||||
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
||||||
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: cidr}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_sha
|
// Test adding rule with ca_sha
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caSha: "12312313123"}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_name
|
// Test adding rule with ca_name
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caName: "root01"}, mf.lastCall)
|
||||||
|
|
||||||
// Test single group
|
// Test single group
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test single groups
|
// Test single groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test multiple AND groups
|
// Test multiple AND groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test Add error
|
// Test Add error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
@@ -892,6 +951,7 @@ type addRuleCall struct {
|
|||||||
groups []string
|
groups []string
|
||||||
host string
|
host string
|
||||||
ip *net.IPNet
|
ip *net.IPNet
|
||||||
|
localIp *net.IPNet
|
||||||
caName string
|
caName string
|
||||||
caSha string
|
caSha string
|
||||||
}
|
}
|
||||||
@@ -901,7 +961,7 @@ type mockFirewall struct {
|
|||||||
nextCallReturn error
|
nextCallReturn error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
mf.lastCall = addRuleCall{
|
mf.lastCall = addRuleCall{
|
||||||
incoming: incoming,
|
incoming: incoming,
|
||||||
proto: proto,
|
proto: proto,
|
||||||
@@ -910,6 +970,7 @@ func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, end
|
|||||||
groups: groups,
|
groups: groups,
|
||||||
host: host,
|
host: host,
|
||||||
ip: ip,
|
ip: ip,
|
||||||
|
localIp: localIp,
|
||||||
caName: caName,
|
caName: caName,
|
||||||
caSha: caSha,
|
caSha: caSha,
|
||||||
}
|
}
|
||||||
|
|||||||
57
go.mod
57
go.mod
@@ -1,48 +1,53 @@
|
|||||||
module github.com/slackhq/nebula
|
module github.com/slackhq/nebula
|
||||||
|
|
||||||
go 1.18
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
dario.cat/mergo v1.0.0
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||||
github.com/armon/go-radix v1.0.0
|
github.com/armon/go-radix v1.0.0
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||||
github.com/flynn/noise v1.0.0
|
github.com/flynn/noise v1.0.1
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/google/gopacket v1.1.19
|
github.com/google/gopacket v1.1.19
|
||||||
github.com/imdario/mergo v0.3.8
|
github.com/kardianos/service v1.2.2
|
||||||
github.com/kardianos/service v1.2.1
|
github.com/miekg/dns v1.1.56
|
||||||
github.com/miekg/dns v1.1.48
|
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||||
github.com/prometheus/client_golang v1.12.1
|
github.com/prometheus/client_golang v1.17.0
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
||||||
github.com/stretchr/testify v1.7.1
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/vishvananda/netlink v1.1.0
|
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29
|
golang.org/x/crypto v0.17.0
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
|
||||||
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71
|
golang.org/x/net v0.19.0
|
||||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
|
golang.org/x/sync v0.5.0
|
||||||
|
golang.org/x/sys v0.15.0
|
||||||
|
golang.org/x/term v0.15.0
|
||||||
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||||
|
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||||
google.golang.org/protobuf v1.28.0
|
google.golang.org/protobuf v1.31.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/google/btree v1.0.1 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
|
||||||
github.com/prometheus/common v0.33.0 // indirect
|
github.com/prometheus/common v0.44.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.11.1 // indirect
|
||||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect
|
github.com/vishvananda/netns v0.0.4 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
golang.org/x/mod v0.12.0 // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
golang.org/x/tools v0.1.10 // indirect
|
golang.org/x/tools v0.13.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
452
go.sum
452
go.sum
@@ -1,38 +1,6 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
|
||||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
|
||||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
|
||||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
|
||||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
|
||||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
|
||||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
|
||||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
|
||||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
|
||||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
|
||||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
|
||||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
|
||||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
|
||||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
|
||||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
|
||||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
|
||||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
|
||||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
|
||||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
@@ -46,134 +14,78 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
|||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/flynn/noise v1.0.1/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
|
||||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
|
||||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
|
||||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
|
||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
|
||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk=
|
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
|
||||||
github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
|
||||||
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.1.48 h1:Ucfr7IIVyMBz4lRE8qmGUuZ4Wt3/ZGu9hmcMT3Uu4tQ=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
|
||||||
|
github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
|
||||||
@@ -187,33 +99,32 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
|
||||||
|
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||||
github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE=
|
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||||
github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
||||||
@@ -224,334 +135,111 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54 h1:8mhqcHPqTMhSPoslhGYihEgSfc77+7La1P6kiB6+9So=
|
||||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
|
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o=
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
|
||||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
|
||||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
|
||||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
|
||||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
|
||||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0=
|
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 h1:PRD0hj6tTuUnCFD08vkvjkYFbQg/9lV8KIxe1y4/cvU=
|
|
||||||
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
|
||||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
|
||||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY=
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||||
|
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo=
|
||||||
|
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4=
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
|
||||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
|
||||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
|
||||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
|
||||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
|
||||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
|
||||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
|
||||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
@@ -560,15 +248,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f h1:8GE2MRjGiFmfpon8dekPI08jEuNMQzSffVHgdupcO4E=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f/go.mod h1:pzr6sy8gDLfVmDAg8OYrlKvGEHw5C3PGTiBXBTCx76Q=
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
|
||||||
|
|||||||
31
handshake.go
31
handshake.go
@@ -1,31 +0,0 @@
|
|||||||
package nebula
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/slackhq/nebula/header"
|
|
||||||
"github.com/slackhq/nebula/udp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H, hostinfo *HostInfo) {
|
|
||||||
// First remote allow list check before we know the vpnIp
|
|
||||||
if addr != nil {
|
|
||||||
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
|
||||||
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch h.Subtype {
|
|
||||||
case header.HandshakeIXPSK0:
|
|
||||||
switch h.MessageCounter {
|
|
||||||
case 1:
|
|
||||||
ixHandshakeStage1(f, addr, via, packet, h)
|
|
||||||
case 2:
|
|
||||||
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
|
|
||||||
tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h)
|
|
||||||
if tearDown && newHostinfo != nil {
|
|
||||||
f.handshakeManager.DeleteHostInfo(newHostinfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
192
handshake_ix.go
192
handshake_ix.go
@@ -1,10 +1,10 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
@@ -14,27 +14,22 @@ import (
|
|||||||
|
|
||||||
// This function constructs a handshake packet, but does not actually send it
|
// This function constructs a handshake packet, but does not actually send it
|
||||||
// Sending is done by the handshake manager
|
// Sending is done by the handshake manager
|
||||||
func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
||||||
// This queries the lighthouse if we don't know a remote for the host
|
err := f.handshakeManager.allocateIndex(hh)
|
||||||
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
|
|
||||||
// more quickly, effect is a quicker handshake.
|
|
||||||
if hostinfo.remote == nil {
|
|
||||||
f.lightHouse.QueryServer(vpnIp, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.handshakeManager.AddIndexHostInfo(hostinfo)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := hostinfo.ConnectionState
|
certState := f.pki.GetCertState()
|
||||||
|
ci := NewConnectionState(f.l, f.cipher, certState, true, noise.HandshakeIX, []byte{}, 0)
|
||||||
|
hh.hostinfo.ConnectionState = ci
|
||||||
|
|
||||||
hsProto := &NebulaHandshakeDetails{
|
hsProto := &NebulaHandshakeDetails{
|
||||||
InitiatorIndex: hostinfo.localIndexId,
|
InitiatorIndex: hh.hostinfo.localIndexId,
|
||||||
Time: uint64(time.Now().UnixNano()),
|
Time: uint64(time.Now().UnixNano()),
|
||||||
Cert: ci.certState.rawCertificateNoKey,
|
Cert: certState.RawCertificateNoKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
hsBytes := []byte{}
|
hsBytes := []byte{}
|
||||||
@@ -45,32 +40,33 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
|||||||
hsBytes, err = hs.Marshal()
|
hsBytes, err = hs.Marshal()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
||||||
atomic.AddUint64(&ci.atomicMessageCounter, 1)
|
ci.messageCounter.Add(1)
|
||||||
|
|
||||||
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are sending handshake packet 1, so we don't expect to receive
|
// We are sending handshake packet 1, so we don't expect to receive
|
||||||
// handshake packet 1 from the responder
|
// handshake packet 1 from the responder
|
||||||
ci.window.Update(f.l, 1)
|
ci.window.Update(f.l, 1)
|
||||||
|
|
||||||
hostinfo.HandshakePacket[0] = msg
|
hh.hostinfo.HandshakePacket[0] = msg
|
||||||
hostinfo.HandshakeReady = true
|
hh.ready = true
|
||||||
hostinfo.handshakeStart = time.Now()
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H) {
|
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||||
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0)
|
certState := f.pki.GetCertState()
|
||||||
|
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
|
||||||
// Mark packet 1 as seen so it doesn't show up as missed
|
// Mark packet 1 as seen so it doesn't show up as missed
|
||||||
ci.window.Update(f.l, 1)
|
ci.window.Update(f.l, 1)
|
||||||
|
|
||||||
@@ -92,7 +88,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
||||||
@@ -144,9 +140,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.Lock()
|
|
||||||
defer hostinfo.Unlock()
|
|
||||||
|
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
@@ -156,7 +149,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
Info("Handshake message received")
|
Info("Handshake message received")
|
||||||
|
|
||||||
hs.Details.ResponderIndex = myIndex
|
hs.Details.ResponderIndex = myIndex
|
||||||
hs.Details.Cert = ci.certState.rawCertificateNoKey
|
hs.Details.Cert = certState.RawCertificateNoKey
|
||||||
// Update the time in case their clock is way off from ours
|
// Update the time in case their clock is way off from ours
|
||||||
hs.Details.Time = uint64(time.Now().UnixNano())
|
hs.Details.Time = uint64(time.Now().UnixNano())
|
||||||
|
|
||||||
@@ -208,25 +201,16 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
hostinfo.SetRemote(addr)
|
hostinfo.SetRemote(addr)
|
||||||
hostinfo.CreateRemoteCIDR(remoteCert)
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
|
|
||||||
// Only overwrite existing record if we should win the handshake race
|
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f)
|
||||||
overwrite := vpnIp > f.myVpnIp
|
|
||||||
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, overwrite, f)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err {
|
switch err {
|
||||||
case ErrAlreadySeen:
|
case ErrAlreadySeen:
|
||||||
// Update remote if preferred (Note we have to switch to locking
|
|
||||||
// the existing hostinfo, and then switch back so the defer Unlock
|
|
||||||
// higher in this function still works)
|
|
||||||
hostinfo.Unlock()
|
|
||||||
existing.Lock()
|
|
||||||
// Update remote if preferred
|
// Update remote if preferred
|
||||||
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
|
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||||
// Send a test packet to ensure the other side has also switched to
|
// Send a test packet to ensure the other side has also switched to
|
||||||
// the preferred remote
|
// the preferred remote
|
||||||
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
}
|
}
|
||||||
existing.Unlock()
|
|
||||||
hostinfo.Lock()
|
|
||||||
|
|
||||||
msg = existing.HandshakePacket[2]
|
msg = existing.HandshakePacket[2]
|
||||||
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||||
@@ -243,14 +227,13 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
via2 := via.(*ViaSender)
|
if via == nil {
|
||||||
if via2 == nil {
|
|
||||||
f.l.Error("Handshake send failed: both addr and via are nil.")
|
f.l.Error("Handshake send failed: both addr and via are nil.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via2.relayHI.vpnIp).
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
return
|
return
|
||||||
@@ -281,16 +264,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
|
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
|
||||||
Error("Failed to add HostInfo due to localIndex collision")
|
Error("Failed to add HostInfo due to localIndex collision")
|
||||||
return
|
return
|
||||||
case ErrExistingHandshake:
|
|
||||||
// We have a race where both parties think they are an initiator and this tunnel lost, let the other one finish
|
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("fingerprint", fingerprint).
|
|
||||||
WithField("issuer", issuer).
|
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
Error("Prevented a pending handshake race")
|
|
||||||
return
|
|
||||||
default:
|
default:
|
||||||
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
|
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
|
||||||
// And we forget to update it here
|
// And we forget to update it here
|
||||||
@@ -324,41 +297,41 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
via2 := via.(*ViaSender)
|
if via == nil {
|
||||||
if via2 == nil {
|
|
||||||
f.l.Error("Handshake send failed: both addr and via are nil.")
|
f.l.Error("Handshake send failed: both addr and via are nil.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("relay", via2.relayHI.vpnIp).
|
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||||
|
hostinfo.ConnectionState.messageCounter.Store(2)
|
||||||
|
hostinfo.remotes.ResetBlockedRemotes()
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *HostInfo, packet []byte, h *header.H) bool {
|
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
|
||||||
if hostinfo == nil {
|
if hh == nil {
|
||||||
// Nothing here to tear down, got a bogus stage 2 packet
|
// Nothing here to tear down, got a bogus stage 2 packet
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.Lock()
|
hh.Lock()
|
||||||
defer hostinfo.Unlock()
|
defer hh.Unlock()
|
||||||
|
|
||||||
|
hostinfo := hh.hostinfo
|
||||||
if addr != nil {
|
if addr != nil {
|
||||||
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
|
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
|
||||||
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
@@ -367,22 +340,6 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
|
|||||||
}
|
}
|
||||||
|
|
||||||
ci := hostinfo.ConnectionState
|
ci := hostinfo.ConnectionState
|
||||||
if ci.ready {
|
|
||||||
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
|
||||||
Info("Handshake is already complete")
|
|
||||||
|
|
||||||
// Update remote if preferred
|
|
||||||
if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) {
|
|
||||||
// Send a test packet to ensure the other side has also switched to
|
|
||||||
// the preferred remote
|
|
||||||
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
|
||||||
}
|
|
||||||
|
|
||||||
// We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
@@ -413,7 +370,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
@@ -436,34 +393,30 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
|
|||||||
Info("Incorrect host responded to handshake")
|
Info("Incorrect host responded to handshake")
|
||||||
|
|
||||||
// Release our old handshake from pending, it should not continue
|
// Release our old handshake from pending, it should not continue
|
||||||
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||||
|
|
||||||
// Create a new hostinfo/handshake for the intended vpn ip
|
// Create a new hostinfo/handshake for the intended vpn ip
|
||||||
//TODO: this adds it to the timer wheel in a way that aggressively retries
|
f.handshakeManager.StartHandshake(hostinfo.vpnIp, func(newHH *HandshakeHostInfo) {
|
||||||
newHostInfo := f.getOrHandshake(hostinfo.vpnIp)
|
//TODO: this doesnt know if its being added or is being used for caching a packet
|
||||||
newHostInfo.Lock()
|
// Block the current used address
|
||||||
|
newHH.hostinfo.remotes = hostinfo.remotes
|
||||||
|
newHH.hostinfo.remotes.BlockRemote(addr)
|
||||||
|
|
||||||
// Block the current used address
|
// Get the correct remote list for the host we did handshake with
|
||||||
newHostInfo.remotes = hostinfo.remotes
|
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||||
newHostInfo.remotes.BlockRemote(addr)
|
|
||||||
|
|
||||||
// Get the correct remote list for the host we did handshake with
|
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
|
||||||
|
Info("Blocked addresses for handshakes")
|
||||||
|
|
||||||
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
// Swap the packet store to benefit the original intended recipient
|
||||||
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
|
newHH.packetStore = hh.packetStore
|
||||||
Info("Blocked addresses for handshakes")
|
hh.packetStore = []*cachedPacket{}
|
||||||
|
|
||||||
// Swap the packet store to benefit the original intended recipient
|
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
||||||
hostinfo.ConnectionState.queueLock.Lock()
|
hostinfo.vpnIp = vpnIp
|
||||||
newHostInfo.packetStore = hostinfo.packetStore
|
f.sendCloseTunnel(hostinfo)
|
||||||
hostinfo.packetStore = []*cachedPacket{}
|
})
|
||||||
hostinfo.ConnectionState.queueLock.Unlock()
|
|
||||||
|
|
||||||
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
|
||||||
hostinfo.vpnIp = vpnIp
|
|
||||||
f.sendCloseTunnel(hostinfo)
|
|
||||||
newHostInfo.Unlock()
|
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -471,7 +424,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
|
|||||||
// Mark packet 2 as seen so it doesn't show up as missed
|
// Mark packet 2 as seen so it doesn't show up as missed
|
||||||
ci.window.Update(f.l, 2)
|
ci.window.Update(f.l, 2)
|
||||||
|
|
||||||
duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
|
duration := time.Since(hh.startTime).Nanoseconds()
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
@@ -479,7 +432,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
|
|||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
WithField("durationNs", duration).
|
WithField("durationNs", duration).
|
||||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
WithField("sentCachedPackets", len(hh.packetStore)).
|
||||||
Info("Handshake message received")
|
Info("Handshake message received")
|
||||||
|
|
||||||
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
||||||
@@ -494,17 +447,32 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
|
|||||||
if addr != nil {
|
if addr != nil {
|
||||||
hostinfo.SetRemote(addr)
|
hostinfo.SetRemote(addr)
|
||||||
} else {
|
} else {
|
||||||
via2 := via.(*ViaSender)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build up the radix for the firewall if we have subnets in the cert
|
// Build up the radix for the firewall if we have subnets in the cert
|
||||||
hostinfo.CreateRemoteCIDR(remoteCert)
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
|
|
||||||
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||||
//TODO: Complete here does not do a race avoidance, it will just take the new tunnel. Is this ok?
|
|
||||||
f.handshakeManager.Complete(hostinfo, f)
|
f.handshakeManager.Complete(hostinfo, f)
|
||||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||||
|
|
||||||
|
hostinfo.ConnectionState.messageCounter.Store(2)
|
||||||
|
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hh.packetStore) > 0 {
|
||||||
|
nb := make([]byte, 12, 12)
|
||||||
|
out := make([]byte, mtu)
|
||||||
|
for _, cp := range hh.packetStore {
|
||||||
|
cp.callback(cp.messageType, cp.messageSubType, hostinfo, cp.packet, nb, out)
|
||||||
|
}
|
||||||
|
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
|
||||||
|
}
|
||||||
|
|
||||||
|
hostinfo.remotes.ResetBlockedRemotes()
|
||||||
f.metricHandshakes.Update(duration)
|
f.metricHandshakes.Update(duration)
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
@@ -42,30 +43,74 @@ type HandshakeConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HandshakeManager struct {
|
type HandshakeManager struct {
|
||||||
pendingHostMap *HostMap
|
// Mutex for interacting with the vpnIps and indexes maps
|
||||||
|
sync.RWMutex
|
||||||
|
|
||||||
|
vpnIps map[iputil.VpnIp]*HandshakeHostInfo
|
||||||
|
indexes map[uint32]*HandshakeHostInfo
|
||||||
|
|
||||||
mainHostMap *HostMap
|
mainHostMap *HostMap
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
outside *udp.Conn
|
outside udp.Conn
|
||||||
config HandshakeConfig
|
config HandshakeConfig
|
||||||
OutboundHandshakeTimer *SystemTimerWheel
|
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
|
||||||
messageMetrics *MessageMetrics
|
messageMetrics *MessageMetrics
|
||||||
metricInitiated metrics.Counter
|
metricInitiated metrics.Counter
|
||||||
metricTimedOut metrics.Counter
|
metricTimedOut metrics.Counter
|
||||||
|
f *Interface
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
|
|
||||||
// can be used to trigger outbound handshake for the given vpnIp
|
// can be used to trigger outbound handshake for the given vpnIp
|
||||||
trigger chan iputil.VpnIp
|
trigger chan iputil.VpnIp
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager {
|
type HandshakeHostInfo struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
startTime time.Time // Time that we first started trying with this handshake
|
||||||
|
ready bool // Is the handshake ready
|
||||||
|
counter int // How many attempts have we made so far
|
||||||
|
lastRemotes []*udp.Addr // Remotes that we sent to during the previous attempt
|
||||||
|
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
||||||
|
|
||||||
|
hostinfo *HostInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
|
||||||
|
if len(hh.packetStore) < 100 {
|
||||||
|
tempPacket := make([]byte, len(packet))
|
||||||
|
copy(tempPacket, packet)
|
||||||
|
|
||||||
|
hh.packetStore = append(hh.packetStore, &cachedPacket{t, st, f, tempPacket})
|
||||||
|
if l.Level >= logrus.DebugLevel {
|
||||||
|
hh.hostinfo.logger(l).
|
||||||
|
WithField("length", len(hh.packetStore)).
|
||||||
|
WithField("stored", true).
|
||||||
|
Debugf("Packet store")
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
m.dropped.Inc(1)
|
||||||
|
|
||||||
|
if l.Level >= logrus.DebugLevel {
|
||||||
|
hh.hostinfo.logger(l).
|
||||||
|
WithField("length", len(hh.packetStore)).
|
||||||
|
WithField("stored", false).
|
||||||
|
Debugf("Packet store")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||||
return &HandshakeManager{
|
return &HandshakeManager{
|
||||||
pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges),
|
vpnIps: map[iputil.VpnIp]*HandshakeHostInfo{},
|
||||||
|
indexes: map[uint32]*HandshakeHostInfo{},
|
||||||
mainHostMap: mainHostMap,
|
mainHostMap: mainHostMap,
|
||||||
lightHouse: lightHouse,
|
lightHouse: lightHouse,
|
||||||
outside: outside,
|
outside: outside,
|
||||||
config: config,
|
config: config,
|
||||||
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
|
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
|
||||||
OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
||||||
messageMetrics: config.messageMetrics,
|
messageMetrics: config.messageMetrics,
|
||||||
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
||||||
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
||||||
@@ -73,7 +118,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) {
|
func (c *HandshakeManager) Run(ctx context.Context) {
|
||||||
clockSource := time.NewTicker(c.config.tryInterval)
|
clockSource := time.NewTicker(c.config.tryInterval)
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
@@ -82,92 +127,119 @@ func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) {
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case vpnIP := <-c.trigger:
|
case vpnIP := <-c.trigger:
|
||||||
c.handleOutbound(vpnIP, f, true)
|
c.handleOutbound(vpnIP, true)
|
||||||
case now := <-clockSource.C:
|
case now := <-clockSource.C:
|
||||||
c.NextOutboundHandshakeTimerTick(now, f)
|
c.NextOutboundHandshakeTimerTick(now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) {
|
func (hm *HandshakeManager) HandleIncoming(addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||||
c.OutboundHandshakeTimer.advance(now)
|
// First remote allow list check before we know the vpnIp
|
||||||
|
if addr != nil {
|
||||||
|
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
||||||
|
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch h.Subtype {
|
||||||
|
case header.HandshakeIXPSK0:
|
||||||
|
switch h.MessageCounter {
|
||||||
|
case 1:
|
||||||
|
ixHandshakeStage1(hm.f, addr, via, packet, h)
|
||||||
|
|
||||||
|
case 2:
|
||||||
|
newHostinfo := hm.queryIndex(h.RemoteIndex)
|
||||||
|
tearDown := ixHandshakeStage2(hm.f, addr, via, newHostinfo, packet, h)
|
||||||
|
if tearDown && newHostinfo != nil {
|
||||||
|
hm.DeleteHostInfo(newHostinfo.hostinfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
|
||||||
|
c.OutboundHandshakeTimer.Advance(now)
|
||||||
for {
|
for {
|
||||||
ep := c.OutboundHandshakeTimer.Purge()
|
vpnIp, has := c.OutboundHandshakeTimer.Purge()
|
||||||
if ep == nil {
|
if !has {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
vpnIp := ep.(iputil.VpnIp)
|
c.handleOutbound(vpnIp, false)
|
||||||
c.handleOutbound(vpnIp, f, false)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) {
|
func (hm *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, lighthouseTriggered bool) {
|
||||||
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
|
hh := hm.queryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if hh == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.Lock()
|
hh.Lock()
|
||||||
defer hostinfo.Unlock()
|
defer hh.Unlock()
|
||||||
|
|
||||||
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed.
|
hostinfo := hh.hostinfo
|
||||||
if hostinfo.HandshakeComplete {
|
// If we are out of time, clean up
|
||||||
// Ensure we don't exist in the pending hostmap anymore since we have completed
|
if hh.counter >= hm.config.retries {
|
||||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
hh.hostinfo.logger(hm.l).WithField("udpAddrs", hh.hostinfo.remotes.CopyAddrs(hm.mainHostMap.preferredRanges)).
|
||||||
|
WithField("initiatorIndex", hh.hostinfo.localIndexId).
|
||||||
|
WithField("remoteIndex", hh.hostinfo.remoteIndexId).
|
||||||
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
WithField("durationNs", time.Since(hh.startTime).Nanoseconds()).
|
||||||
|
Info("Handshake timed out")
|
||||||
|
hm.metricTimedOut.Inc(1)
|
||||||
|
hm.DeleteHostInfo(hostinfo)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Increment the counter to increase our delay, linear backoff
|
||||||
|
hh.counter++
|
||||||
|
|
||||||
// Check if we have a handshake packet to transmit yet
|
// Check if we have a handshake packet to transmit yet
|
||||||
if !hostinfo.HandshakeReady {
|
if !hh.ready {
|
||||||
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly
|
if !ixHandshakeStage0(hm.f, hh) {
|
||||||
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState
|
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
|
||||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
return
|
||||||
return
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// If we are out of time, clean up
|
|
||||||
if hostinfo.HandshakeCounter >= c.config.retries {
|
|
||||||
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)).
|
|
||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
|
||||||
WithField("remoteIndex", hostinfo.remoteIndexId).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
|
|
||||||
Info("Handshake timed out")
|
|
||||||
c.metricTimedOut.Inc(1)
|
|
||||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// We only care about a lighthouse trigger before the first handshake transmit attempt. This is a very specific
|
|
||||||
// optimization for a fast lighthouse reply
|
|
||||||
//TODO: it would feel better to do this once, anytime, as our delay increases over time
|
|
||||||
if lighthouseTriggered && hostinfo.HandshakeCounter > 0 {
|
|
||||||
// If we didn't return here a lighthouse could cause us to aggressively send handshakes
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a remotes object if we don't already have one.
|
// Get a remotes object if we don't already have one.
|
||||||
// This is mainly to protect us as this should never be the case
|
// This is mainly to protect us as this should never be the case
|
||||||
// NB ^ This comment doesn't jive. It's how the thing gets intiailized.
|
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
|
||||||
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
||||||
if hostinfo.remotes == nil {
|
if hostinfo.remotes == nil {
|
||||||
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
|
hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: this will generate a load of queries for hosts with only 1 ip (i'm not using a lighthouse, static mapped)
|
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.preferredRanges)
|
||||||
if hostinfo.remotes.Len(c.pendingHostMap.preferredRanges) <= 1 {
|
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hh.lastRemotes)
|
||||||
|
|
||||||
|
// We only care about a lighthouse trigger if we have new remotes to send to.
|
||||||
|
// This is a very specific optimization for a fast lighthouse reply.
|
||||||
|
if lighthouseTriggered && !remotesHaveChanged {
|
||||||
|
// If we didn't return here a lighthouse could cause us to aggressively send handshakes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hh.lastRemotes = remotes
|
||||||
|
|
||||||
|
// TODO: this will generate a load of queries for hosts with only 1 ip
|
||||||
|
// (such as ones registered to the lighthouse with only a private IP)
|
||||||
|
// So we only do it one time after attempting 5 handshakes already.
|
||||||
|
if len(remotes) <= 1 && hh.counter == 5 {
|
||||||
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
||||||
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
||||||
// the learned public ip for them. Query again to short circuit the promotion counter
|
// the learned public ip for them. Query again to short circuit the promotion counter
|
||||||
c.lightHouse.QueryServer(vpnIp, f)
|
hm.lightHouse.QueryServer(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send a the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
||||||
var sentTo []*udp.Addr
|
var sentTo []*udp.Addr
|
||||||
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
hostinfo.remotes.ForEach(hm.mainHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
||||||
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
hm.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||||
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
err := hm.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(c.l).WithField("udpAddr", addr).
|
hostinfo.logger(hm.l).WithField("udpAddr", addr).
|
||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
WithError(err).Error("Failed to send handshake message")
|
WithError(err).Error("Failed to send handshake message")
|
||||||
@@ -177,103 +249,179 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout
|
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout,
|
||||||
if len(sentTo) > 0 {
|
// so only log when the list of remotes has changed
|
||||||
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
if remotesHaveChanged {
|
||||||
|
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
|
} else if hm.l.IsLevelEnabled(logrus.DebugLevel) {
|
||||||
|
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
||||||
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
Debug("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
if hm.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
||||||
hostinfo.logger(c.l).WithField("relayIps", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||||
// Send a RelayRequest to all known Relay IP's
|
// Send a RelayRequest to all known Relay IP's
|
||||||
for _, relay := range hostinfo.remotes.relays {
|
for _, relay := range hostinfo.remotes.relays {
|
||||||
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
||||||
if *relay == vpnIp || *relay == c.lightHouse.myVpnIp {
|
if *relay == vpnIp || *relay == hm.lightHouse.myVpnIp {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
|
relayHostInfo := hm.mainHostMap.QueryVpnIp(*relay)
|
||||||
if err != nil || relayHostInfo.remote == nil {
|
if relayHostInfo == nil || relayHostInfo.remote == nil {
|
||||||
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target.")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||||
f.Handshake(*relay)
|
hm.f.Handshake(*relay)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Check the relay HostInfo to see if we already established a relay through it
|
// Check the relay HostInfo to see if we already established a relay through it
|
||||||
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
|
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
|
||||||
switch existingRelay.State {
|
switch existingRelay.State {
|
||||||
case Established:
|
case Established:
|
||||||
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||||
f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
||||||
case Requested:
|
case Requested:
|
||||||
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||||
// Re-send the CreateRelay request, in case the previous one was lost.
|
// Re-send the CreateRelay request, in case the previous one was lost.
|
||||||
m := NebulaControl{
|
m := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
InitiatorRelayIndex: existingRelay.LocalIndex,
|
InitiatorRelayIndex: existingRelay.LocalIndex,
|
||||||
RelayFromIp: uint32(c.lightHouse.myVpnIp),
|
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
|
||||||
RelayToIp: uint32(vpnIp),
|
RelayToIp: uint32(vpnIp),
|
||||||
}
|
}
|
||||||
msg, err := m.Marshal()
|
msg, err := m.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(c.l).
|
hostinfo.logger(hm.l).
|
||||||
WithError(err).
|
WithError(err).
|
||||||
Error("Failed to marshal Control message to create relay")
|
Error("Failed to marshal Control message to create relay")
|
||||||
} else {
|
} else {
|
||||||
f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu))
|
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||||
|
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
hm.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": hm.lightHouse.myVpnIp,
|
||||||
|
"relayTo": vpnIp,
|
||||||
|
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||||
|
"relay": *relay}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
hostinfo.logger(c.l).
|
hostinfo.logger(hm.l).
|
||||||
WithField("vpnIp", vpnIp).
|
WithField("vpnIp", vpnIp).
|
||||||
WithField("state", existingRelay.State).
|
WithField("state", existingRelay.State).
|
||||||
WithField("relayVpnIp", relayHostInfo.vpnIp).
|
WithField("relay", relayHostInfo.vpnIp).
|
||||||
Errorf("Relay unexpected state")
|
Errorf("Relay unexpected state")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// No relays exist or requested yet.
|
// No relays exist or requested yet.
|
||||||
if relayHostInfo.remote != nil {
|
if relayHostInfo.remote != nil {
|
||||||
idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
||||||
}
|
}
|
||||||
|
|
||||||
m := NebulaControl{
|
m := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
InitiatorRelayIndex: idx,
|
InitiatorRelayIndex: idx,
|
||||||
RelayFromIp: uint32(c.lightHouse.myVpnIp),
|
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
|
||||||
RelayToIp: uint32(vpnIp),
|
RelayToIp: uint32(vpnIp),
|
||||||
}
|
}
|
||||||
msg, err := m.Marshal()
|
msg, err := m.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(c.l).
|
hostinfo.logger(hm.l).
|
||||||
WithError(err).
|
WithError(err).
|
||||||
Error("Failed to marshal Control message to create relay")
|
Error("Failed to marshal Control message to create relay")
|
||||||
} else {
|
} else {
|
||||||
f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu))
|
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
hm.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": hm.lightHouse.myVpnIp,
|
||||||
|
"relayTo": vpnIp,
|
||||||
|
"initiatorRelayIndex": idx,
|
||||||
|
"relay": *relay}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment the counter to increase our delay, linear backoff
|
|
||||||
hostinfo.HandshakeCounter++
|
|
||||||
|
|
||||||
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
||||||
if !lighthouseTriggered {
|
if !lighthouseTriggered {
|
||||||
//TODO: feel like we dupe handshake real fast in a tight loop, why?
|
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
|
||||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo {
|
// GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
|
||||||
hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init)
|
// The 2nd argument will be true if the hostinfo is ready to transmit traffic
|
||||||
|
func (hm *HandshakeManager) GetOrHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||||
if created {
|
// Check the main hostmap and maintain a read lock if our host is not there
|
||||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
|
hm.mainHostMap.RLock()
|
||||||
c.metricInitiated.Inc(1)
|
if h, ok := hm.mainHostMap.Hosts[vpnIp]; ok {
|
||||||
|
hm.mainHostMap.RUnlock()
|
||||||
|
// Do not attempt promotion if you are a lighthouse
|
||||||
|
if !hm.lightHouse.amLighthouse {
|
||||||
|
h.TryPromoteBest(hm.mainHostMap.preferredRanges, hm.f)
|
||||||
|
}
|
||||||
|
return h, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer hm.mainHostMap.RUnlock()
|
||||||
|
return hm.StartHandshake(vpnIp, cacheCb), false
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
||||||
|
func (hm *HandshakeManager) StartHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) *HostInfo {
|
||||||
|
hm.Lock()
|
||||||
|
defer hm.Unlock()
|
||||||
|
|
||||||
|
if hh, ok := hm.vpnIps[vpnIp]; ok {
|
||||||
|
// We are already trying to handshake with this vpn ip
|
||||||
|
if cacheCb != nil {
|
||||||
|
cacheCb(hh)
|
||||||
|
}
|
||||||
|
return hh.hostinfo
|
||||||
|
}
|
||||||
|
|
||||||
|
hostinfo := &HostInfo{
|
||||||
|
vpnIp: vpnIp,
|
||||||
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
|
relayState: RelayState{
|
||||||
|
relays: map[iputil.VpnIp]struct{}{},
|
||||||
|
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||||
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
hh := &HandshakeHostInfo{
|
||||||
|
hostinfo: hostinfo,
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
hm.vpnIps[vpnIp] = hh
|
||||||
|
hm.metricInitiated.Inc(1)
|
||||||
|
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval)
|
||||||
|
|
||||||
|
if cacheCb != nil {
|
||||||
|
cacheCb(hh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||||
|
// We can trigger the handshake right now
|
||||||
|
_, doTrigger := hm.lightHouse.GetStaticHostList()[vpnIp]
|
||||||
|
if !doTrigger {
|
||||||
|
// Add any calculated remotes, and trigger early handshake if one found
|
||||||
|
doTrigger = hm.lightHouse.addCalculatedRemotes(vpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if doTrigger {
|
||||||
|
select {
|
||||||
|
case hm.trigger <- vpnIp:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hm.lightHouse.QueryServer(vpnIp)
|
||||||
return hostinfo
|
return hostinfo
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -281,7 +429,6 @@ var (
|
|||||||
ErrExistingHostInfo = errors.New("existing hostinfo")
|
ErrExistingHostInfo = errors.New("existing hostinfo")
|
||||||
ErrAlreadySeen = errors.New("already seen")
|
ErrAlreadySeen = errors.New("already seen")
|
||||||
ErrLocalIndexCollision = errors.New("local index collision")
|
ErrLocalIndexCollision = errors.New("local index collision")
|
||||||
ErrExistingHandshake = errors.New("existing handshake")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CheckAndComplete checks for any conflicts in the main and pending hostmap
|
// CheckAndComplete checks for any conflicts in the main and pending hostmap
|
||||||
@@ -295,22 +442,27 @@ var (
|
|||||||
//
|
//
|
||||||
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
||||||
// hostmap for the hostinfo.localIndexId.
|
// hostmap for the hostinfo.localIndexId.
|
||||||
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, overwrite bool, f *Interface) (*HostInfo, error) {
|
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
|
||||||
c.pendingHostMap.Lock()
|
|
||||||
defer c.pendingHostMap.Unlock()
|
|
||||||
c.mainHostMap.Lock()
|
c.mainHostMap.Lock()
|
||||||
defer c.mainHostMap.Unlock()
|
defer c.mainHostMap.Unlock()
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
// Check if we already have a tunnel with this vpn ip
|
// Check if we already have a tunnel with this vpn ip
|
||||||
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
||||||
if found && existingHostInfo != nil {
|
if found && existingHostInfo != nil {
|
||||||
// Is it just a delayed handshake packet?
|
testHostInfo := existingHostInfo
|
||||||
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) {
|
for testHostInfo != nil {
|
||||||
return existingHostInfo, ErrAlreadySeen
|
// Is it just a delayed handshake packet?
|
||||||
|
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], testHostInfo.HandshakePacket[handshakePacket]) {
|
||||||
|
return testHostInfo, ErrAlreadySeen
|
||||||
|
}
|
||||||
|
|
||||||
|
testHostInfo = testHostInfo.next
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is this a newer handshake?
|
// Is this a newer handshake?
|
||||||
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime {
|
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime && !existingHostInfo.ConnectionState.initiator {
|
||||||
return existingHostInfo, ErrExistingHostInfo
|
return existingHostInfo, ErrExistingHostInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -323,8 +475,8 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
|||||||
return existingIndex, ErrLocalIndexCollision
|
return existingIndex, ErrLocalIndexCollision
|
||||||
}
|
}
|
||||||
|
|
||||||
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId]
|
existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
|
||||||
if found && existingIndex != hostinfo {
|
if found && existingPendingIndex.hostinfo != hostinfo {
|
||||||
// We have a collision, but for a different hostinfo
|
// We have a collision, but for a different hostinfo
|
||||||
return existingIndex, ErrLocalIndexCollision
|
return existingIndex, ErrLocalIndexCollision
|
||||||
}
|
}
|
||||||
@@ -338,90 +490,54 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
|||||||
Info("New host shadows existing host remoteIndex")
|
Info("New host shadows existing host remoteIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we are also handshaking with this vpn ip
|
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||||
pendingHostInfo, found := c.pendingHostMap.Hosts[hostinfo.vpnIp]
|
|
||||||
if found && pendingHostInfo != nil {
|
|
||||||
if !overwrite {
|
|
||||||
// We won, let our pending handshake win
|
|
||||||
return pendingHostInfo, ErrExistingHandshake
|
|
||||||
}
|
|
||||||
|
|
||||||
// We lost, take this handshake and move any cached packets over so they get sent
|
|
||||||
pendingHostInfo.ConnectionState.queueLock.Lock()
|
|
||||||
hostinfo.packetStore = append(hostinfo.packetStore, pendingHostInfo.packetStore...)
|
|
||||||
c.pendingHostMap.unlockedDeleteHostInfo(pendingHostInfo)
|
|
||||||
pendingHostInfo.ConnectionState.queueLock.Unlock()
|
|
||||||
pendingHostInfo.logger(c.l).Info("Handshake race lost, replacing pending handshake with completed tunnel")
|
|
||||||
}
|
|
||||||
|
|
||||||
if existingHostInfo != nil {
|
|
||||||
// We are going to overwrite this entry, so remove the old references
|
|
||||||
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
|
|
||||||
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
|
|
||||||
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
|
|
||||||
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
|
|
||||||
delete(c.mainHostMap.Relays, relayIdx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.mainHostMap.addHostInfo(hostinfo, f)
|
|
||||||
return existingHostInfo, nil
|
return existingHostInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complete is a simpler version of CheckAndComplete when we already know we
|
// Complete is a simpler version of CheckAndComplete when we already know we
|
||||||
// won't have a localIndexId collision because we already have an entry in the
|
// won't have a localIndexId collision because we already have an entry in the
|
||||||
// pendingHostMap
|
// pendingHostMap. An existing hostinfo is returned if there was one.
|
||||||
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
func (hm *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
||||||
c.pendingHostMap.Lock()
|
hm.mainHostMap.Lock()
|
||||||
defer c.pendingHostMap.Unlock()
|
defer hm.mainHostMap.Unlock()
|
||||||
c.mainHostMap.Lock()
|
hm.Lock()
|
||||||
defer c.mainHostMap.Unlock()
|
defer hm.Unlock()
|
||||||
|
|
||||||
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
existingRemoteIndex, found := hm.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
if found && existingHostInfo != nil {
|
|
||||||
// We are going to overwrite this entry, so remove the old references
|
|
||||||
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
|
|
||||||
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
|
|
||||||
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
|
|
||||||
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
|
|
||||||
delete(c.mainHostMap.Relays, relayIdx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
|
||||||
if found && existingRemoteIndex != nil {
|
if found && existingRemoteIndex != nil {
|
||||||
// We have a collision, but this can happen since we can't control
|
// We have a collision, but this can happen since we can't control
|
||||||
// the remote ID. Just log about the situation as a note.
|
// the remote ID. Just log about the situation as a note.
|
||||||
hostinfo.logger(c.l).
|
hostinfo.logger(hm.l).
|
||||||
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
||||||
Info("New host shadows existing host remoteIndex")
|
Info("New host shadows existing host remoteIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.mainHostMap.addHostInfo(hostinfo, f)
|
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
|
||||||
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
|
hm.unlockedDeleteHostInfo(hostinfo)
|
||||||
|
hm.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
|
// allocateIndex generates a unique localIndexId for this HostInfo
|
||||||
// and adds it to the pendingHostMap. Will error if we are unable to generate
|
// and adds it to the pendingHostMap. Will error if we are unable to generate
|
||||||
// a unique localIndexId
|
// a unique localIndexId
|
||||||
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
func (hm *HandshakeManager) allocateIndex(hh *HandshakeHostInfo) error {
|
||||||
c.pendingHostMap.Lock()
|
hm.mainHostMap.RLock()
|
||||||
defer c.pendingHostMap.Unlock()
|
defer hm.mainHostMap.RUnlock()
|
||||||
c.mainHostMap.RLock()
|
hm.Lock()
|
||||||
defer c.mainHostMap.RUnlock()
|
defer hm.Unlock()
|
||||||
|
|
||||||
for i := 0; i < 32; i++ {
|
for i := 0; i < 32; i++ {
|
||||||
index, err := generateIndex(c.l)
|
index, err := generateIndex(hm.l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, inPending := c.pendingHostMap.Indexes[index]
|
_, inPending := hm.indexes[index]
|
||||||
_, inMain := c.mainHostMap.Indexes[index]
|
_, inMain := hm.mainHostMap.Indexes[index]
|
||||||
|
|
||||||
if !inMain && !inPending {
|
if !inMain && !inPending {
|
||||||
h.localIndexId = index
|
hh.hostinfo.localIndexId = index
|
||||||
c.pendingHostMap.Indexes[index] = h
|
hm.indexes[index] = hh
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -429,22 +545,90 @@ func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
|||||||
return errors.New("failed to generate unique localIndexId")
|
return errors.New("failed to generate unique localIndexId")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
|
||||||
c.pendingHostMap.addRemoteIndexHostInfo(index, h)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
||||||
//l.Debugln("Deleting pending hostinfo :", hostinfo)
|
c.Lock()
|
||||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
defer c.Unlock()
|
||||||
|
c.unlockedDeleteHostInfo(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
|
func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
return c.pendingHostMap.QueryIndex(index)
|
delete(c.vpnIps, hostinfo.vpnIp)
|
||||||
|
if len(c.vpnIps) == 0 {
|
||||||
|
c.vpnIps = map[iputil.VpnIp]*HandshakeHostInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(c.indexes, hostinfo.localIndexId)
|
||||||
|
if len(c.vpnIps) == 0 {
|
||||||
|
c.indexes = map[uint32]*HandshakeHostInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.l.Level >= logrus.DebugLevel {
|
||||||
|
c.l.WithField("hostMap", m{"mapTotalSize": len(c.vpnIps),
|
||||||
|
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||||
|
Debug("Pending hostmap hostInfo deleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hm *HandshakeManager) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
||||||
|
hh := hm.queryVpnIp(vpnIp)
|
||||||
|
if hh != nil {
|
||||||
|
return hh.hostinfo
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hm *HandshakeManager) queryVpnIp(vpnIp iputil.VpnIp) *HandshakeHostInfo {
|
||||||
|
hm.RLock()
|
||||||
|
defer hm.RUnlock()
|
||||||
|
return hm.vpnIps[vpnIp]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hm *HandshakeManager) QueryIndex(index uint32) *HostInfo {
|
||||||
|
hh := hm.queryIndex(index)
|
||||||
|
if hh != nil {
|
||||||
|
return hh.hostinfo
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hm *HandshakeManager) queryIndex(index uint32) *HandshakeHostInfo {
|
||||||
|
hm.RLock()
|
||||||
|
defer hm.RUnlock()
|
||||||
|
return hm.indexes[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HandshakeManager) GetPreferredRanges() []*net.IPNet {
|
||||||
|
return c.mainHostMap.preferredRanges
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
|
||||||
|
for _, v := range c.vpnIps {
|
||||||
|
f(v.hostinfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HandshakeManager) ForEachIndex(f controlEach) {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
|
||||||
|
for _, v := range c.indexes {
|
||||||
|
f(v.hostinfo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) EmitStats() {
|
func (c *HandshakeManager) EmitStats() {
|
||||||
c.pendingHostMap.EmitStats("pending")
|
c.RLock()
|
||||||
c.mainHostMap.EmitStats("main")
|
hostLen := len(c.vpnIps)
|
||||||
|
indexLen := len(c.indexes)
|
||||||
|
c.RUnlock()
|
||||||
|
|
||||||
|
metrics.GetOrRegisterGauge("hostmap.pending.hosts", nil).Update(int64(hostLen))
|
||||||
|
metrics.GetOrRegisterGauge("hostmap.pending.indexes", nil).Update(int64(indexLen))
|
||||||
|
c.mainHostMap.EmitStats()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Utility functions below
|
// Utility functions below
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
@@ -14,109 +15,57 @@ import (
|
|||||||
|
|
||||||
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
mw := &mockEncWriter{}
|
mainHM := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
lh := newTestLighthouse()
|
||||||
lh := &LightHouse{
|
|
||||||
atomicStaticList: make(map[iputil.VpnIp]struct{}),
|
cs := &CertState{
|
||||||
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
|
RawCertificate: []byte{},
|
||||||
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
PrivateKey: []byte{},
|
||||||
|
Certificate: &cert.NebulaCertificate{},
|
||||||
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
|
||||||
|
blah.f = &Interface{handshakeManager: blah, pki: &PKI{}, l: l}
|
||||||
|
blah.f.pki.cs.Store(cs)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
blah.NextOutboundHandshakeTimerTick(now)
|
||||||
|
|
||||||
var initCalled bool
|
i := blah.StartHandshake(ip, nil)
|
||||||
initFunc := func(*HostInfo) {
|
i2 := blah.StartHandshake(ip, nil)
|
||||||
initCalled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
i := blah.AddVpnIp(ip, initFunc)
|
|
||||||
assert.True(t, initCalled)
|
|
||||||
|
|
||||||
initCalled = false
|
|
||||||
i2 := blah.AddVpnIp(ip, initFunc)
|
|
||||||
assert.False(t, initCalled)
|
|
||||||
assert.Same(t, i, i2)
|
assert.Same(t, i, i2)
|
||||||
|
|
||||||
i.remotes = NewRemoteList()
|
i.remotes = NewRemoteList(nil)
|
||||||
i.HandshakeReady = true
|
|
||||||
|
|
||||||
// Adding something to pending should not affect the main hostmap
|
// Adding something to pending should not affect the main hostmap
|
||||||
assert.Len(t, mainHM.Hosts, 0)
|
assert.Len(t, mainHM.Hosts, 0)
|
||||||
|
|
||||||
// Confirm they are in the pending index list
|
// Confirm they are in the pending index list
|
||||||
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
assert.Contains(t, blah.vpnIps, ip)
|
||||||
|
|
||||||
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
|
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
|
||||||
for i := 1; i <= DefaultHandshakeRetries+1; i++ {
|
for i := 1; i <= DefaultHandshakeRetries+1; i++ {
|
||||||
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
|
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
|
||||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
blah.NextOutboundHandshakeTimerTick(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Confirm they are still in the pending index list
|
// Confirm they are still in the pending index list
|
||||||
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
assert.Contains(t, blah.vpnIps, ip)
|
||||||
|
|
||||||
// Tick 1 more time, a minute will certainly flush it out
|
// Tick 1 more time, a minute will certainly flush it out
|
||||||
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw)
|
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute))
|
||||||
|
|
||||||
// Confirm they have been removed
|
// Confirm they have been removed
|
||||||
assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
|
assert.NotContains(t, blah.vpnIps, ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewHandshakeManagerTrigger(t *testing.T) {
|
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
|
||||||
l := test.NewLogger()
|
for _, i := range tw.t.wheel {
|
||||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
|
||||||
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
|
||||||
mw := &mockEncWriter{}
|
|
||||||
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
|
||||||
lh := &LightHouse{
|
|
||||||
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
|
||||||
l: l,
|
|
||||||
atomicStaticList: make(map[iputil.VpnIp]struct{}),
|
|
||||||
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
|
||||||
|
|
||||||
assert.Equal(t, 0, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
|
|
||||||
hi := blah.AddVpnIp(ip, nil)
|
|
||||||
hi.HandshakeReady = true
|
|
||||||
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
assert.Equal(t, 0, hi.HandshakeCounter, "Should not have attempted a handshake yet")
|
|
||||||
|
|
||||||
// Trigger the same method the channel will but, this should set our remotes pointer
|
|
||||||
blah.handleOutbound(ip, mw, true)
|
|
||||||
assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have done a handshake attempt")
|
|
||||||
assert.NotNil(t, hi.remotes, "Manager should have set my remotes pointer")
|
|
||||||
|
|
||||||
// Make sure the trigger doesn't double schedule the timer entry
|
|
||||||
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
|
|
||||||
uaddr := udp.NewAddrFromString("10.1.1.1:4242")
|
|
||||||
hi.remotes.unlockedPrependV4(ip, NewIp4AndPort(uaddr.IP, uint32(uaddr.Port)))
|
|
||||||
|
|
||||||
// We now have remotes but only the first trigger should have pushed things forward
|
|
||||||
blah.handleOutbound(ip, mw, true)
|
|
||||||
assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have not done a handshake attempt")
|
|
||||||
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) {
|
|
||||||
for _, i := range tw.wheel {
|
|
||||||
n := i.Head
|
n := i.Head
|
||||||
for n != nil {
|
for n != nil {
|
||||||
c++
|
c++
|
||||||
@@ -133,7 +82,11 @@ func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.Mess
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) {
|
func (mw *mockEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
585
hostmap.go
585
hostmap.go
@@ -1,9 +1,7 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -18,10 +16,16 @@ import (
|
|||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
//const ProbeLen = 100
|
// const ProbeLen = 100
|
||||||
const PromoteEvery = 1000
|
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
|
||||||
const ReQueryEvery = 5000
|
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
||||||
|
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
||||||
const MaxRemotes = 10
|
const MaxRemotes = 10
|
||||||
|
const maxRecvError = 4
|
||||||
|
|
||||||
|
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
||||||
|
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
||||||
|
const MaxHostInfosPerVpnIp = 5
|
||||||
|
|
||||||
// How long we should prevent roaming back to the previous IP.
|
// How long we should prevent roaming back to the previous IP.
|
||||||
// This helps prevent flapping due to packets already in flight
|
// This helps prevent flapping due to packets already in flight
|
||||||
@@ -29,6 +33,7 @@ const RoamingSuppressSeconds = 2
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
Requested = iota
|
Requested = iota
|
||||||
|
PeerRequested
|
||||||
Established
|
Established
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -48,7 +53,6 @@ type Relay struct {
|
|||||||
|
|
||||||
type HostMap struct {
|
type HostMap struct {
|
||||||
sync.RWMutex //Because we concurrently read and write to our maps
|
sync.RWMutex //Because we concurrently read and write to our maps
|
||||||
name string
|
|
||||||
Indexes map[uint32]*HostInfo
|
Indexes map[uint32]*HostInfo
|
||||||
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
||||||
RemoteIndexes map[uint32]*HostInfo
|
RemoteIndexes map[uint32]*HostInfo
|
||||||
@@ -59,6 +63,9 @@ type HostMap struct {
|
|||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For synchronization, treat the pointed-to Relay struct as immutable. To edit the Relay
|
||||||
|
// struct, make a copy of an existing value, edit the fileds in the copy, and
|
||||||
|
// then store a pointer to the new copy in both realyForBy* maps.
|
||||||
type RelayState struct {
|
type RelayState struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
@@ -73,6 +80,16 @@ func (rs *RelayState) DeleteRelay(ip iputil.VpnIp) {
|
|||||||
delete(rs.relays, ip)
|
delete(rs.relays, ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
||||||
|
rs.RLock()
|
||||||
|
defer rs.RUnlock()
|
||||||
|
ret := make([]*Relay, 0, len(rs.relayForByIdx))
|
||||||
|
for _, r := range rs.relayForByIdx {
|
||||||
|
ret = append(ret, r)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) {
|
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
@@ -119,13 +136,43 @@ func (rs *RelayState) CopyRelayForIdxs() []uint32 {
|
|||||||
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) {
|
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
relay, ok := rs.relayForByIdx[localIdx]
|
r, ok := rs.relayForByIdx[localIdx]
|
||||||
if !ok {
|
if !ok {
|
||||||
return iputil.VpnIp(0), false
|
return iputil.VpnIp(0), false
|
||||||
}
|
}
|
||||||
delete(rs.relayForByIdx, localIdx)
|
delete(rs.relayForByIdx, localIdx)
|
||||||
delete(rs.relayForByIp, relay.PeerIp)
|
delete(rs.relayForByIp, r.PeerIp)
|
||||||
return relay.PeerIp, true
|
return r.PeerIp, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) CompleteRelayByIP(vpnIp iputil.VpnIp, remoteIdx uint32) bool {
|
||||||
|
rs.Lock()
|
||||||
|
defer rs.Unlock()
|
||||||
|
r, ok := rs.relayForByIp[vpnIp]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
newRelay := *r
|
||||||
|
newRelay.State = Established
|
||||||
|
newRelay.RemoteIndex = remoteIdx
|
||||||
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
||||||
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Relay, bool) {
|
||||||
|
rs.Lock()
|
||||||
|
defer rs.Unlock()
|
||||||
|
r, ok := rs.relayForByIdx[localIdx]
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
newRelay := *r
|
||||||
|
newRelay.State = Established
|
||||||
|
newRelay.RemoteIndex = remoteIdx
|
||||||
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
||||||
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
||||||
|
return &newRelay, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) {
|
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) {
|
||||||
@@ -141,6 +188,7 @@ func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
|
|||||||
r, ok := rs.relayForByIdx[idx]
|
r, ok := rs.relayForByIdx[idx]
|
||||||
return r, ok
|
return r, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
@@ -149,24 +197,24 @@ func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HostInfo struct {
|
type HostInfo struct {
|
||||||
sync.RWMutex
|
remote *udp.Addr
|
||||||
|
remotes *RemoteList
|
||||||
|
promoteCounter atomic.Uint32
|
||||||
|
ConnectionState *ConnectionState
|
||||||
|
remoteIndexId uint32
|
||||||
|
localIndexId uint32
|
||||||
|
vpnIp iputil.VpnIp
|
||||||
|
recvError atomic.Uint32
|
||||||
|
remoteCidr *cidr.Tree4[struct{}]
|
||||||
|
relayState RelayState
|
||||||
|
|
||||||
remote *udp.Addr
|
// HandshakePacket records the packets used to create this hostinfo
|
||||||
remotes *RemoteList
|
// We need these to avoid replayed handshake packets creating new hostinfos which causes churn
|
||||||
promoteCounter uint32
|
HandshakePacket map[uint8][]byte
|
||||||
ConnectionState *ConnectionState
|
|
||||||
handshakeStart time.Time //todo: this an entry in the handshake manager
|
// nextLHQuery is the earliest we can ask the lighthouse for new information.
|
||||||
HandshakeReady bool //todo: being in the manager means you are ready
|
// This is used to limit lighthouse re-queries in chatty clients
|
||||||
HandshakeCounter int //todo: another handshake manager entry
|
nextLHQuery atomic.Int64
|
||||||
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
|
|
||||||
HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry
|
|
||||||
packetStore []*cachedPacket //todo: this is other handshake manager entry
|
|
||||||
remoteIndexId uint32
|
|
||||||
localIndexId uint32
|
|
||||||
vpnIp iputil.VpnIp
|
|
||||||
recvError int
|
|
||||||
remoteCidr *cidr.Tree4
|
|
||||||
relayState RelayState
|
|
||||||
|
|
||||||
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
|
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
|
||||||
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
|
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
|
||||||
@@ -180,6 +228,10 @@ type HostInfo struct {
|
|||||||
|
|
||||||
lastRoam time.Time
|
lastRoam time.Time
|
||||||
lastRoamRemote *udp.Addr
|
lastRoamRemote *udp.Addr
|
||||||
|
|
||||||
|
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
||||||
|
// Synchronised via hostmap lock and not the hostinfo lock.
|
||||||
|
next, prev *HostInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
type ViaSender struct {
|
type ViaSender struct {
|
||||||
@@ -202,13 +254,12 @@ type cachedPacketMetrics struct {
|
|||||||
dropped metrics.Counter
|
dropped metrics.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
|
func NewHostMap(l *logrus.Logger, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
|
||||||
h := map[iputil.VpnIp]*HostInfo{}
|
h := map[iputil.VpnIp]*HostInfo{}
|
||||||
i := map[uint32]*HostInfo{}
|
i := map[uint32]*HostInfo{}
|
||||||
r := map[uint32]*HostInfo{}
|
r := map[uint32]*HostInfo{}
|
||||||
relays := map[uint32]*HostInfo{}
|
relays := map[uint32]*HostInfo{}
|
||||||
m := HostMap{
|
m := HostMap{
|
||||||
name: name,
|
|
||||||
Indexes: i,
|
Indexes: i,
|
||||||
Relays: relays,
|
Relays: relays,
|
||||||
RemoteIndexes: r,
|
RemoteIndexes: r,
|
||||||
@@ -220,8 +271,8 @@ func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRang
|
|||||||
return &m
|
return &m
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateStats takes a name and reports host and index counts to the stats collection system
|
// EmitStats reports host, index, and relay counts to the stats collection system
|
||||||
func (hm *HostMap) EmitStats(name string) {
|
func (hm *HostMap) EmitStats() {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
hostLen := len(hm.Hosts)
|
hostLen := len(hm.Hosts)
|
||||||
indexLen := len(hm.Indexes)
|
indexLen := len(hm.Indexes)
|
||||||
@@ -229,283 +280,178 @@ func (hm *HostMap) EmitStats(name string) {
|
|||||||
relaysLen := len(hm.Relays)
|
relaysLen := len(hm.Relays)
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
|
|
||||||
metrics.GetOrRegisterGauge("hostmap."+name+".hosts", nil).Update(int64(hostLen))
|
metrics.GetOrRegisterGauge("hostmap.main.hosts", nil).Update(int64(hostLen))
|
||||||
metrics.GetOrRegisterGauge("hostmap."+name+".indexes", nil).Update(int64(indexLen))
|
metrics.GetOrRegisterGauge("hostmap.main.indexes", nil).Update(int64(indexLen))
|
||||||
metrics.GetOrRegisterGauge("hostmap."+name+".remoteIndexes", nil).Update(int64(remoteIndexLen))
|
metrics.GetOrRegisterGauge("hostmap.main.remoteIndexes", nil).Update(int64(remoteIndexLen))
|
||||||
metrics.GetOrRegisterGauge("hostmap."+name+".relayIndexes", nil).Update(int64(relaysLen))
|
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
hiRelay, ok := hm.Relays[localIdx]
|
_, ok := hm.Relays[localIdx]
|
||||||
if !ok {
|
if !ok {
|
||||||
hm.Unlock()
|
hm.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
delete(hm.Relays, localIdx)
|
delete(hm.Relays, localIdx)
|
||||||
hm.Unlock()
|
hm.Unlock()
|
||||||
ip, ok := hiRelay.relayState.RemoveRelay(localIdx)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hiPeer, err := hm.QueryVpnIp(ip)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var otherPeerIdx uint32
|
|
||||||
hiPeer.relayState.DeleteRelay(hiRelay.vpnIp)
|
|
||||||
relay, ok := hiPeer.relayState.GetRelayForByIp(hiRelay.vpnIp)
|
|
||||||
if ok {
|
|
||||||
otherPeerIdx = relay.LocalIndex
|
|
||||||
}
|
|
||||||
// I am a relaying host. I need to remove the other relay, too.
|
|
||||||
hm.RemoveRelay(otherPeerIdx)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) {
|
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
|
||||||
hm.RLock()
|
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
||||||
if i, ok := hm.Hosts[vpnIp]; ok {
|
|
||||||
index := i.localIndexId
|
|
||||||
hm.RUnlock()
|
|
||||||
return index, nil
|
|
||||||
}
|
|
||||||
hm.RUnlock()
|
|
||||||
return 0, errors.New("vpn IP not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) Add(ip iputil.VpnIp, hostinfo *HostInfo) {
|
|
||||||
hm.Lock()
|
|
||||||
hm.Hosts[ip] = hostinfo
|
|
||||||
hm.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (hostinfo *HostInfo, created bool) {
|
|
||||||
hm.RLock()
|
|
||||||
if h, ok := hm.Hosts[vpnIp]; !ok {
|
|
||||||
hm.RUnlock()
|
|
||||||
h = &HostInfo{
|
|
||||||
promoteCounter: 0,
|
|
||||||
vpnIp: vpnIp,
|
|
||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
|
||||||
relayState: RelayState{
|
|
||||||
relays: map[iputil.VpnIp]struct{}{},
|
|
||||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if init != nil {
|
|
||||||
init(h)
|
|
||||||
}
|
|
||||||
hm.Lock()
|
|
||||||
hm.Hosts[vpnIp] = h
|
|
||||||
hm.Unlock()
|
|
||||||
return h, true
|
|
||||||
} else {
|
|
||||||
hm.RUnlock()
|
|
||||||
return h, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) DeleteVpnIp(vpnIp iputil.VpnIp) {
|
|
||||||
hm.Lock()
|
|
||||||
delete(hm.Hosts, vpnIp)
|
|
||||||
if len(hm.Hosts) == 0 {
|
|
||||||
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
|
||||||
}
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": vpnIp, "mapTotalSize": len(hm.Hosts)}).
|
|
||||||
Debug("Hostmap vpnIp deleted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only used by pendingHostMap when the remote index is not initially known
|
|
||||||
func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
|
||||||
hm.Lock()
|
|
||||||
h.remoteIndexId = index
|
|
||||||
hm.RemoteIndexes[index] = h
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level > logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes),
|
|
||||||
"hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "hostId": h.vpnIp}}).
|
|
||||||
Debug("Hostmap remoteIndex added")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) AddVpnIpHostInfo(vpnIp iputil.VpnIp, h *HostInfo) {
|
|
||||||
hm.Lock()
|
|
||||||
h.vpnIp = vpnIp
|
|
||||||
hm.Hosts[vpnIp] = h
|
|
||||||
hm.Indexes[h.localIndexId] = h
|
|
||||||
hm.RemoteIndexes[h.remoteIndexId] = h
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level > logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": vpnIp, "mapTotalSize": len(hm.Hosts),
|
|
||||||
"hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "vpnIp": h.vpnIp}}).
|
|
||||||
Debug("Hostmap vpnIp added")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is only called in pendingHostmap, to cleanup an inbound handshake
|
|
||||||
func (hm *HostMap) DeleteIndex(index uint32) {
|
|
||||||
hm.Lock()
|
|
||||||
hostinfo, ok := hm.Indexes[index]
|
|
||||||
if ok {
|
|
||||||
delete(hm.Indexes, index)
|
|
||||||
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
|
|
||||||
|
|
||||||
// Check if we have an entry under hostId that matches the same hostinfo
|
|
||||||
// instance. Clean it up as well if we do.
|
|
||||||
hostinfo2, ok := hm.Hosts[hostinfo.vpnIp]
|
|
||||||
if ok && hostinfo2 == hostinfo {
|
|
||||||
delete(hm.Hosts, hostinfo.vpnIp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}).
|
|
||||||
Debug("Hostmap index deleted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is used to cleanup on recv_error
|
|
||||||
func (hm *HostMap) DeleteReverseIndex(index uint32) {
|
|
||||||
hm.Lock()
|
|
||||||
hostinfo, ok := hm.RemoteIndexes[index]
|
|
||||||
if ok {
|
|
||||||
delete(hm.Indexes, hostinfo.localIndexId)
|
|
||||||
delete(hm.RemoteIndexes, index)
|
|
||||||
|
|
||||||
// Check if we have an entry under hostId that matches the same hostinfo
|
|
||||||
// instance. Clean it up as well if we do (they might not match in pendingHostmap)
|
|
||||||
var hostinfo2 *HostInfo
|
|
||||||
hostinfo2, ok = hm.Hosts[hostinfo.vpnIp]
|
|
||||||
if ok && hostinfo2 == hostinfo {
|
|
||||||
delete(hm.Hosts, hostinfo.vpnIp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}).
|
|
||||||
Debug("Hostmap remote index deleted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) {
|
|
||||||
// Delete the host itself, ensuring it's not modified anymore
|
// Delete the host itself, ensuring it's not modified anymore
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
|
// If we have a previous or next hostinfo then we are not the last one for this vpn ip
|
||||||
|
final := (hostinfo.next == nil && hostinfo.prev == nil)
|
||||||
hm.unlockedDeleteHostInfo(hostinfo)
|
hm.unlockedDeleteHostInfo(hostinfo)
|
||||||
hm.Unlock()
|
hm.Unlock()
|
||||||
|
|
||||||
// And tear down all the relays going through this host
|
return final
|
||||||
for _, localIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
|
||||||
hm.RemoveRelay(localIdx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// And tear down the relays this deleted hostInfo was using to be reached
|
|
||||||
teardownRelayIdx := []uint32{}
|
|
||||||
for _, relayIp := range hostinfo.relayState.CopyRelayIps() {
|
|
||||||
relayHostInfo, err := hm.QueryVpnIp(relayIp)
|
|
||||||
if err != nil {
|
|
||||||
hm.l.WithError(err).WithField("relay", relayIp).Info("Missing relay host in hostmap")
|
|
||||||
} else {
|
|
||||||
if r, ok := relayHostInfo.relayState.QueryRelayForByIp(hostinfo.vpnIp); ok {
|
|
||||||
teardownRelayIdx = append(teardownRelayIdx, r.LocalIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, localIdx := range teardownRelayIdx {
|
|
||||||
hm.RemoveRelay(localIdx)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
|
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
defer hm.Unlock()
|
defer hm.Unlock()
|
||||||
delete(hm.RemoteIndexes, localIdx)
|
hm.unlockedMakePrimary(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
||||||
|
oldHostinfo := hm.Hosts[hostinfo.vpnIp]
|
||||||
|
if oldHostinfo == hostinfo {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostinfo.prev != nil {
|
||||||
|
hostinfo.prev.next = hostinfo.next
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostinfo.next != nil {
|
||||||
|
hostinfo.next.prev = hostinfo.prev
|
||||||
|
}
|
||||||
|
|
||||||
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
||||||
|
|
||||||
|
if oldHostinfo == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hostinfo.next = oldHostinfo
|
||||||
|
oldHostinfo.prev = hostinfo
|
||||||
|
hostinfo.prev = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
// Check if this same hostId is in the hostmap with a different instance.
|
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
||||||
// This could happen if we have an entry in the pending hostmap with different
|
if ok && primary == hostinfo {
|
||||||
// index values than the one in the main hostmap.
|
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
||||||
hostinfo2, ok := hm.Hosts[hostinfo.vpnIp]
|
delete(hm.Hosts, hostinfo.vpnIp)
|
||||||
if ok && hostinfo2 != hostinfo {
|
if len(hm.Hosts) == 0 {
|
||||||
delete(hm.Hosts, hostinfo2.vpnIp)
|
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
||||||
delete(hm.Indexes, hostinfo2.localIndexId)
|
}
|
||||||
delete(hm.RemoteIndexes, hostinfo2.remoteIndexId)
|
|
||||||
|
if hostinfo.next != nil {
|
||||||
|
// We had more than 1 hostinfo at this vpnip, promote the next in the list to primary
|
||||||
|
hm.Hosts[hostinfo.vpnIp] = hostinfo.next
|
||||||
|
// It is primary, there is no previous hostinfo now
|
||||||
|
hostinfo.next.prev = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Relink if we were in the middle of multiple hostinfos for this vpn ip
|
||||||
|
if hostinfo.prev != nil {
|
||||||
|
hostinfo.prev.next = hostinfo.next
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostinfo.next != nil {
|
||||||
|
hostinfo.next.prev = hostinfo.prev
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(hm.Hosts, hostinfo.vpnIp)
|
hostinfo.next = nil
|
||||||
if len(hm.Hosts) == 0 {
|
hostinfo.prev = nil
|
||||||
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
|
||||||
|
// The remote index uses index ids outside our control so lets make sure we are only removing
|
||||||
|
// the remote index pointer here if it points to the hostinfo we are deleting
|
||||||
|
hostinfo2, ok := hm.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
|
if ok && hostinfo2 == hostinfo {
|
||||||
|
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
|
||||||
|
if len(hm.RemoteIndexes) == 0 {
|
||||||
|
hm.RemoteIndexes = map[uint32]*HostInfo{}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(hm.Indexes, hostinfo.localIndexId)
|
delete(hm.Indexes, hostinfo.localIndexId)
|
||||||
if len(hm.Indexes) == 0 {
|
if len(hm.Indexes) == 0 {
|
||||||
hm.Indexes = map[uint32]*HostInfo{}
|
hm.Indexes = map[uint32]*HostInfo{}
|
||||||
}
|
}
|
||||||
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
|
|
||||||
if len(hm.RemoteIndexes) == 0 {
|
|
||||||
hm.RemoteIndexes = map[uint32]*HostInfo{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
if hm.l.Level >= logrus.DebugLevel {
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts),
|
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
|
||||||
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||||
Debug("Hostmap hostInfo deleted")
|
Debug("Hostmap hostInfo deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
|
delete(hm.Relays, localRelayIdx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) {
|
func (hm *HostMap) QueryIndex(index uint32) *HostInfo {
|
||||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
if h, ok := hm.Indexes[index]; ok {
|
if h, ok := hm.Indexes[index]; ok {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
return h, nil
|
return h
|
||||||
} else {
|
} else {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
return nil, errors.New("unable to find index")
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) {
|
|
||||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
func (hm *HostMap) QueryRelayIndex(index uint32) *HostInfo {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
if h, ok := hm.Relays[index]; ok {
|
if h, ok := hm.Relays[index]; ok {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
return h, nil
|
return h
|
||||||
} else {
|
} else {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
return nil, errors.New("unable to find index")
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryReverseIndex(index uint32) (*HostInfo, error) {
|
func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
if h, ok := hm.RemoteIndexes[index]; ok {
|
if h, ok := hm.RemoteIndexes[index]; ok {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
return h, nil
|
return h
|
||||||
} else {
|
} else {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
return nil, fmt.Errorf("unable to find reverse index or connectionstate nil in %s hostmap", hm.name)
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) (*HostInfo, error) {
|
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
||||||
return hm.queryVpnIp(vpnIp, nil)
|
return hm.queryVpnIp(vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every
|
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*HostInfo, *Relay, error) {
|
||||||
// `PromoteEvery` calls to this function for a given host.
|
hm.RLock()
|
||||||
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) {
|
defer hm.RUnlock()
|
||||||
return hm.queryVpnIp(vpnIp, ifce)
|
|
||||||
|
h, ok := hm.Hosts[relayHostIp]
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, errors.New("unable to find host")
|
||||||
|
}
|
||||||
|
for h != nil {
|
||||||
|
r, ok := h.relayState.QueryRelayForByIp(targetIp)
|
||||||
|
if ok && r.State == Established {
|
||||||
|
return h, r, nil
|
||||||
|
}
|
||||||
|
h = h.next
|
||||||
|
}
|
||||||
|
return nil, nil, errors.New("unable to find host with relay")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*HostInfo, error) {
|
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) *HostInfo {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
if h, ok := hm.Hosts[vpnIp]; ok {
|
if h, ok := hm.Hosts[vpnIp]; ok {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
@@ -513,90 +459,78 @@ func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*Host
|
|||||||
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
|
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
|
||||||
h.TryPromoteBest(hm.preferredRanges, promoteIfce)
|
h.TryPromoteBest(hm.preferredRanges, promoteIfce)
|
||||||
}
|
}
|
||||||
return h, nil
|
return h
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
return nil, errors.New("unable to find host")
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We already have the hm Lock when this is called, so make sure to not call
|
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
|
||||||
// any other methods that might try to grab it again
|
// If an entry exists for the Hosts table (vpnIp -> hostinfo) then the provided hostinfo will be made primary
|
||||||
func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) {
|
func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
||||||
if f.serveDns {
|
if f.serveDns {
|
||||||
remoteCert := hostinfo.ConnectionState.peerCert
|
remoteCert := hostinfo.ConnectionState.peerCert
|
||||||
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
existing := hm.Hosts[hostinfo.vpnIp]
|
||||||
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
||||||
|
|
||||||
|
if existing != nil {
|
||||||
|
hostinfo.next = existing
|
||||||
|
existing.prev = hostinfo
|
||||||
|
}
|
||||||
|
|
||||||
hm.Indexes[hostinfo.localIndexId] = hostinfo
|
hm.Indexes[hostinfo.localIndexId] = hostinfo
|
||||||
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
if hm.l.Level >= logrus.DebugLevel {
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
||||||
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
||||||
Debug("Hostmap vpnIp added")
|
Debug("Hostmap vpnIp added")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i := 1
|
||||||
|
check := hostinfo
|
||||||
|
for check != nil {
|
||||||
|
if i > MaxHostInfosPerVpnIp {
|
||||||
|
hm.unlockedDeleteHostInfo(check)
|
||||||
|
}
|
||||||
|
check = check.next
|
||||||
|
i++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// punchList assembles a list of all non nil RemoteList pointer entries in this hostmap
|
func (hm *HostMap) GetPreferredRanges() []*net.IPNet {
|
||||||
// The caller can then do the its work outside of the read lock
|
return hm.preferredRanges
|
||||||
func (hm *HostMap) punchList(rl []*RemoteList) []*RemoteList {
|
}
|
||||||
|
|
||||||
|
func (hm *HostMap) ForEachVpnIp(f controlEach) {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
defer hm.RUnlock()
|
defer hm.RUnlock()
|
||||||
|
|
||||||
for _, v := range hm.Hosts {
|
for _, v := range hm.Hosts {
|
||||||
if v.remotes != nil {
|
f(v)
|
||||||
rl = append(rl, v.remotes)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return rl
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Punchy iterates through the result of punchList() to assemble all known addresses and sends a hole punch packet to them
|
func (hm *HostMap) ForEachIndex(f controlEach) {
|
||||||
func (hm *HostMap) Punchy(ctx context.Context, conn *udp.Conn) {
|
hm.RLock()
|
||||||
var metricsTxPunchy metrics.Counter
|
defer hm.RUnlock()
|
||||||
if hm.metricsEnabled {
|
|
||||||
metricsTxPunchy = metrics.GetOrRegisterCounter("messages.tx.punchy", nil)
|
|
||||||
} else {
|
|
||||||
metricsTxPunchy = metrics.NilCounter{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var remotes []*RemoteList
|
for _, v := range hm.Indexes {
|
||||||
b := []byte{1}
|
f(v)
|
||||||
|
|
||||||
clockSource := time.NewTicker(time.Second * 10)
|
|
||||||
defer clockSource.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
remotes = hm.punchList(remotes[:0])
|
|
||||||
for _, rl := range remotes {
|
|
||||||
//TODO: CopyAddrs generates garbage but ForEach locks for the work here, figure out which way is better
|
|
||||||
for _, addr := range rl.CopyAddrs(hm.preferredRanges) {
|
|
||||||
metricsTxPunchy.Inc(1)
|
|
||||||
conn.WriteTo(b, addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-clockSource.C:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
||||||
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
||||||
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
|
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
|
||||||
c := atomic.AddUint32(&i.promoteCounter, 1)
|
c := i.promoteCounter.Add(1)
|
||||||
if c%PromoteEvery == 0 {
|
if c%ifce.tryPromoteEvery.Load() == 0 {
|
||||||
// The lock here is currently protecting i.remote access
|
|
||||||
i.RLock()
|
|
||||||
remote := i.remote
|
remote := i.remote
|
||||||
i.RUnlock()
|
|
||||||
|
|
||||||
// return early if we are already on a preferred remote
|
// return early if we are already on a preferred remote
|
||||||
if remote != nil {
|
if remote != nil {
|
||||||
@@ -620,66 +554,17 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Re query our lighthouses for new remotes occasionally
|
// Re query our lighthouses for new remotes occasionally
|
||||||
if c%ReQueryEvery == 0 && ifce.lightHouse != nil {
|
if c%ifce.reQueryEvery.Load() == 0 && ifce.lightHouse != nil {
|
||||||
ifce.lightHouse.QueryServer(i.vpnIp, ifce)
|
now := time.Now().UnixNano()
|
||||||
}
|
if now < i.nextLHQuery.Load() {
|
||||||
}
|
return
|
||||||
|
|
||||||
func (i *HostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
|
|
||||||
//TODO: return the error so we can log with more context
|
|
||||||
if len(i.packetStore) < 100 {
|
|
||||||
tempPacket := make([]byte, len(packet))
|
|
||||||
copy(tempPacket, packet)
|
|
||||||
//l.WithField("trace", string(debug.Stack())).Error("Caching packet", tempPacket)
|
|
||||||
i.packetStore = append(i.packetStore, &cachedPacket{t, st, f, tempPacket})
|
|
||||||
if l.Level >= logrus.DebugLevel {
|
|
||||||
i.logger(l).
|
|
||||||
WithField("length", len(i.packetStore)).
|
|
||||||
WithField("stored", true).
|
|
||||||
Debugf("Packet store")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if l.Level >= logrus.DebugLevel {
|
i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
|
||||||
m.dropped.Inc(1)
|
ifce.lightHouse.QueryServer(i.vpnIp)
|
||||||
i.logger(l).
|
|
||||||
WithField("length", len(i.packetStore)).
|
|
||||||
WithField("stored", false).
|
|
||||||
Debugf("Packet store")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handshakeComplete will set the connection as ready to communicate, as well as flush any stored packets
|
|
||||||
func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
|
|
||||||
//TODO: I'm not certain the distinction between handshake complete and ConnectionState being ready matters because:
|
|
||||||
//TODO: HandshakeComplete means send stored packets and ConnectionState.ready means we are ready to send
|
|
||||||
//TODO: if the transition from HandhsakeComplete to ConnectionState.ready happens all within this function they are identical
|
|
||||||
|
|
||||||
i.ConnectionState.queueLock.Lock()
|
|
||||||
i.HandshakeComplete = true
|
|
||||||
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
|
|
||||||
// Clamping it to 2 gets us out of the woods for now
|
|
||||||
atomic.StoreUint64(&i.ConnectionState.atomicMessageCounter, 2)
|
|
||||||
|
|
||||||
if l.Level >= logrus.DebugLevel {
|
|
||||||
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(i.packetStore) > 0 {
|
|
||||||
nb := make([]byte, 12, 12)
|
|
||||||
out := make([]byte, mtu)
|
|
||||||
for _, cp := range i.packetStore {
|
|
||||||
cp.callback(cp.messageType, cp.messageSubType, i, cp.packet, nb, out)
|
|
||||||
}
|
|
||||||
m.sent.Inc(int64(len(i.packetStore)))
|
|
||||||
}
|
|
||||||
|
|
||||||
i.remotes.ResetBlockedRemotes()
|
|
||||||
i.packetStore = make([]*cachedPacket, 0)
|
|
||||||
i.ConnectionState.ready = true
|
|
||||||
i.ConnectionState.queueLock.Unlock()
|
|
||||||
i.ConnectionState.certState = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
||||||
if i.ConnectionState != nil {
|
if i.ConnectionState != nil {
|
||||||
return i.ConnectionState.peerCert
|
return i.ConnectionState.peerCert
|
||||||
@@ -736,9 +621,8 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) RecvErrorExceeded() bool {
|
func (i *HostInfo) RecvErrorExceeded() bool {
|
||||||
if i.recvError < 3 {
|
if i.recvError.Add(1) >= maxRecvError {
|
||||||
i.recvError += 1
|
return true
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -749,7 +633,7 @@ func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteCidr := cidr.NewTree4()
|
remoteCidr := cidr.NewTree4[struct{}]()
|
||||||
for _, ip := range c.Details.Ips {
|
for _, ip := range c.Details.Ips {
|
||||||
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||||
}
|
}
|
||||||
@@ -765,7 +649,10 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
|||||||
return logrus.NewEntry(l)
|
return logrus.NewEntry(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
li := l.WithField("vpnIp", i.vpnIp)
|
li := l.WithField("vpnIp", i.vpnIp).
|
||||||
|
WithField("localIndex", i.localIndexId).
|
||||||
|
WithField("remoteIndex", i.remoteIndexId)
|
||||||
|
|
||||||
if connState := i.ConnectionState; connState != nil {
|
if connState := i.ConnectionState; connState != nil {
|
||||||
if peerCert := connState.peerCert; peerCert != nil {
|
if peerCert := connState.peerCert; peerCert != nil {
|
||||||
li = li.WithField("certName", peerCert.Details.Name)
|
li = li.WithField("certName", peerCert.Details.Name)
|
||||||
|
|||||||
206
hostmap_test.go
206
hostmap_test.go
@@ -1 +1,207 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/test"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHostMap_MakePrimary(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
hm := NewHostMap(
|
||||||
|
l,
|
||||||
|
&net.IPNet{
|
||||||
|
IP: net.IP{10, 0, 0, 1},
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
},
|
||||||
|
[]*net.IPNet{},
|
||||||
|
)
|
||||||
|
|
||||||
|
f := &Interface{}
|
||||||
|
|
||||||
|
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
|
||||||
|
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
|
||||||
|
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
|
||||||
|
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
|
||||||
|
|
||||||
|
hm.unlockedAddHostInfo(h4, f)
|
||||||
|
hm.unlockedAddHostInfo(h3, f)
|
||||||
|
hm.unlockedAddHostInfo(h2, f)
|
||||||
|
hm.unlockedAddHostInfo(h1, f)
|
||||||
|
|
||||||
|
// Make sure we go h1 -> h2 -> h3 -> h4
|
||||||
|
prim := hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Swap h3/middle to primary
|
||||||
|
hm.MakePrimary(h3)
|
||||||
|
|
||||||
|
// Make sure we go h3 -> h1 -> h2 -> h4
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Swap h4/tail to primary
|
||||||
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Nil(t, h2.next)
|
||||||
|
|
||||||
|
// Swap h4 again should be no-op
|
||||||
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Nil(t, h2.next)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
hm := NewHostMap(
|
||||||
|
l,
|
||||||
|
&net.IPNet{
|
||||||
|
IP: net.IP{10, 0, 0, 1},
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
},
|
||||||
|
[]*net.IPNet{},
|
||||||
|
)
|
||||||
|
|
||||||
|
f := &Interface{}
|
||||||
|
|
||||||
|
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
|
||||||
|
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
|
||||||
|
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
|
||||||
|
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
|
||||||
|
h5 := &HostInfo{vpnIp: 1, localIndexId: 5}
|
||||||
|
h6 := &HostInfo{vpnIp: 1, localIndexId: 6}
|
||||||
|
|
||||||
|
hm.unlockedAddHostInfo(h6, f)
|
||||||
|
hm.unlockedAddHostInfo(h5, f)
|
||||||
|
hm.unlockedAddHostInfo(h4, f)
|
||||||
|
hm.unlockedAddHostInfo(h3, f)
|
||||||
|
hm.unlockedAddHostInfo(h2, f)
|
||||||
|
hm.unlockedAddHostInfo(h1, f)
|
||||||
|
|
||||||
|
// h6 should be deleted
|
||||||
|
assert.Nil(t, h6.next)
|
||||||
|
assert.Nil(t, h6.prev)
|
||||||
|
h := hm.QueryIndex(h6.localIndexId)
|
||||||
|
assert.Nil(t, h)
|
||||||
|
|
||||||
|
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
|
||||||
|
prim := hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Delete primary
|
||||||
|
hm.DeleteHostInfo(h1)
|
||||||
|
assert.Nil(t, h1.prev)
|
||||||
|
assert.Nil(t, h1.next)
|
||||||
|
|
||||||
|
// Make sure we go h2 -> h3 -> h4 -> h5
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Delete in the middle
|
||||||
|
hm.DeleteHostInfo(h3)
|
||||||
|
assert.Nil(t, h3.prev)
|
||||||
|
assert.Nil(t, h3.next)
|
||||||
|
|
||||||
|
// Make sure we go h2 -> h4 -> h5
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Delete the tail
|
||||||
|
hm.DeleteHostInfo(h5)
|
||||||
|
assert.Nil(t, h5.prev)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Make sure we go h2 -> h4
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Delete the head
|
||||||
|
hm.DeleteHostInfo(h2)
|
||||||
|
assert.Nil(t, h2.prev)
|
||||||
|
assert.Nil(t, h2.next)
|
||||||
|
|
||||||
|
// Make sure we only have h4
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Nil(t, prim.next)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Delete the only item
|
||||||
|
hm.DeleteHostInfo(h4)
|
||||||
|
assert.Nil(t, h4.prev)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Make sure we have nil
|
||||||
|
prim = hm.QueryVpnIp(1)
|
||||||
|
assert.Nil(t, prim)
|
||||||
|
}
|
||||||
|
|||||||
24
hostmap_tester.go
Normal file
24
hostmap_tester.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
// This file contains functions used to export information to the e2e testing framework
|
||||||
|
|
||||||
|
import "github.com/slackhq/nebula/iputil"
|
||||||
|
|
||||||
|
func (i *HostInfo) GetVpnIp() iputil.VpnIp {
|
||||||
|
return i.vpnIp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *HostInfo) GetLocalIndex() uint32 {
|
||||||
|
return i.localIndexId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *HostInfo) GetRemoteIndex() uint32 {
|
||||||
|
return i.remoteIndexId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *HostInfo) GetRelayState() *RelayState {
|
||||||
|
return &i.relayState
|
||||||
|
}
|
||||||
238
inside.go
238
inside.go
@@ -1,20 +1,20 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
err := newPacket(packet, false, fwPacket)
|
err := newPacket(packet, false, fwPacket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err)
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -25,8 +25,9 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
|
|
||||||
if fwPacket.RemoteIP == f.myVpnIp {
|
if fwPacket.RemoteIP == f.myVpnIp {
|
||||||
// Immediately forward packets from self to self.
|
// Immediately forward packets from self to self.
|
||||||
// This should only happen on Darwin-based hosts, which routes packets from
|
// This should only happen on Darwin-based and FreeBSD hosts, which
|
||||||
// the Nebula IP to the Nebula IP through the Nebula TUN device.
|
// routes packets from the Nebula IP to the Nebula IP through the Nebula
|
||||||
|
// TUN device.
|
||||||
if immediatelyForwardToSelf {
|
if immediatelyForwardToSelf {
|
||||||
_, err := f.readers[q].Write(packet)
|
_, err := f.readers[q].Write(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -43,8 +44,12 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
|
hostinfo, ready := f.getOrHandshake(fwPacket.RemoteIP, func(hh *HandshakeHostInfo) {
|
||||||
|
hh.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
||||||
|
})
|
||||||
|
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
|
f.rejectInside(packet, out, q)
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("vpnIp", fwPacket.RemoteIP).
|
f.l.WithField("vpnIp", fwPacket.RemoteIP).
|
||||||
WithField("fwPacket", fwPacket).
|
WithField("fwPacket", fwPacket).
|
||||||
@@ -52,96 +57,83 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ci := hostinfo.ConnectionState
|
|
||||||
|
|
||||||
if ci.ready == false {
|
if !ready {
|
||||||
// Because we might be sending stored packets, lock here to stop new things going to
|
return
|
||||||
// the packet queue.
|
|
||||||
ci.queueLock.Lock()
|
|
||||||
if !ci.ready {
|
|
||||||
hostinfo.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
|
||||||
ci.queueLock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ci.queueLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.caPool, localCache)
|
dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.pki.GetCAPool(), localCache)
|
||||||
if dropReason == nil {
|
if dropReason == nil {
|
||||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, packet, nb, out, q)
|
f.sendNoMetrics(header.Message, 0, hostinfo.ConnectionState, hostinfo, nil, packet, nb, out, q)
|
||||||
|
|
||||||
} else if f.l.Level >= logrus.DebugLevel {
|
} else {
|
||||||
hostinfo.logger(f.l).
|
f.rejectInside(packet, out, q)
|
||||||
WithField("fwPacket", fwPacket).
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
WithField("reason", dropReason).
|
hostinfo.logger(f.l).
|
||||||
Debugln("dropping outbound packet")
|
WithField("fwPacket", fwPacket).
|
||||||
|
WithField("reason", dropReason).
|
||||||
|
Debugln("dropping outbound packet")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Interface) rejectInside(packet []byte, out []byte, q int) {
|
||||||
|
if !f.firewall.InSendReject {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = iputil.CreateRejectPacket(packet, out)
|
||||||
|
if len(out) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := f.readers[q].Write(out)
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) rejectOutside(packet []byte, ci *ConnectionState, hostinfo *HostInfo, nb, out []byte, q int) {
|
||||||
|
if !f.firewall.OutSendReject {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = iputil.CreateRejectPacket(packet, out)
|
||||||
|
if len(out) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out) > iputil.MaxRejectPacketSize {
|
||||||
|
if f.l.GetLevel() >= logrus.InfoLevel {
|
||||||
|
f.l.
|
||||||
|
WithField("packet", packet).
|
||||||
|
WithField("outPacket", out).
|
||||||
|
Info("rejectOutside: packet too big, not sending")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, out, nb, packet, q)
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
|
func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
|
||||||
f.getOrHandshake(vpnIp)
|
f.getOrHandshake(vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getOrHandshake returns nil if the vpnIp is not routable
|
// getOrHandshake returns nil if the vpnIp is not routable.
|
||||||
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
|
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
|
||||||
//TODO: we can find contains without converting back to bytes
|
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||||
if f.hostMap.vpnCIDR.Contains(vpnIp.ToIP()) == false {
|
if !ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, vpnIp) {
|
||||||
vpnIp = f.inside.RouteFor(vpnIp)
|
vpnIp = f.inside.RouteFor(vpnIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return nil
|
return nil, false
|
||||||
}
|
|
||||||
}
|
|
||||||
hostinfo, err := f.hostMap.PromoteBestQueryVpnIp(vpnIp, f)
|
|
||||||
|
|
||||||
//if err != nil || hostinfo.ConnectionState == nil {
|
|
||||||
if err != nil {
|
|
||||||
hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp)
|
|
||||||
if err != nil {
|
|
||||||
hostinfo = f.handshakeManager.AddVpnIp(vpnIp, f.initHostInfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ci := hostinfo.ConnectionState
|
|
||||||
|
|
||||||
if ci != nil && ci.eKey != nil && ci.ready {
|
|
||||||
return hostinfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handshake is not ready, we need to grab the lock now before we start the handshake process
|
|
||||||
hostinfo.Lock()
|
|
||||||
defer hostinfo.Unlock()
|
|
||||||
|
|
||||||
// Double check, now that we have the lock
|
|
||||||
ci = hostinfo.ConnectionState
|
|
||||||
if ci != nil && ci.eKey != nil && ci.ready {
|
|
||||||
return hostinfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have already created the handshake packet, we don't want to call the function at all.
|
|
||||||
if !hostinfo.HandshakeReady {
|
|
||||||
ixHandshakeStage0(f, vpnIp, hostinfo)
|
|
||||||
// FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us.
|
|
||||||
//xx_handshakeStage0(f, ip, hostinfo)
|
|
||||||
|
|
||||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
|
||||||
// We can trigger the handshake right now
|
|
||||||
if _, ok := f.lightHouse.GetStaticHostList()[vpnIp]; ok {
|
|
||||||
select {
|
|
||||||
case f.handshakeManager.trigger <- vpnIp:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return hostinfo
|
return f.handshakeManager.GetOrHandshake(vpnIp, cacheCallback)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initHostInfo is the init function to pass to (*HandshakeManager).AddVpnIP that
|
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
// will create the initial Noise ConnectionState
|
|
||||||
func (f *Interface) initHostInfo(hostinfo *HostInfo) {
|
|
||||||
hostinfo.ConnectionState = f.newConnectionState(f.l, true, noise.HandshakeIX, []byte{}, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
|
||||||
fp := &firewall.Packet{}
|
fp := &firewall.Packet{}
|
||||||
err := newPacket(p, false, fp)
|
err := newPacket(p, false, fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -150,7 +142,7 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if packet is in outbound fw rules
|
// check if packet is in outbound fw rules
|
||||||
dropReason := f.firewall.Drop(p, *fp, false, hostInfo, f.caPool, nil)
|
dropReason := f.firewall.Drop(p, *fp, false, hostinfo, f.pki.GetCAPool(), nil)
|
||||||
if dropReason != nil {
|
if dropReason != nil {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("fwPacket", fp).
|
f.l.WithField("fwPacket", fp).
|
||||||
@@ -160,12 +152,15 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.sendNoMetrics(header.Message, st, hostInfo.ConnectionState, hostInfo, nil, p, nb, out, 0)
|
f.sendNoMetrics(header.Message, st, hostinfo.ConnectionState, hostinfo, nil, p, nb, out, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
||||||
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
||||||
hostInfo := f.getOrHandshake(vpnIp)
|
hostInfo, ready := f.getOrHandshake(vpnIp, func(hh *HandshakeHostInfo) {
|
||||||
|
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
||||||
|
})
|
||||||
|
|
||||||
if hostInfo == nil {
|
if hostInfo == nil {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("vpnIp", vpnIp).
|
f.l.WithField("vpnIp", vpnIp).
|
||||||
@@ -174,24 +169,15 @@ func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSu
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hostInfo.ConnectionState.ready {
|
if !ready {
|
||||||
// Because we might be sending stored packets, lock here to stop new things going to
|
return
|
||||||
// the packet queue.
|
|
||||||
hostInfo.ConnectionState.queueLock.Lock()
|
|
||||||
if !hostInfo.ConnectionState.ready {
|
|
||||||
hostInfo.cachePacket(f.l, t, st, p, f.sendMessageToVpnIp, f.cachedPacketMetrics)
|
|
||||||
hostInfo.ConnectionState.queueLock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hostInfo.ConnectionState.queueLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f.sendMessageToVpnIp(t, st, hostInfo, p, nb, out)
|
f.SendMessageToHostInfo(t, st, hostInfo, p, nb, out)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendMessageToVpnIp(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hi *HostInfo, p, nb, out []byte) {
|
||||||
f.send(t, st, hostInfo.ConnectionState, hostInfo, p, nb, out)
|
f.send(t, st, hi.ConnectionState, hi, p, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
@@ -204,7 +190,7 @@ func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *C
|
|||||||
f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
|
f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendVia sends a payload through a Relay tunnel. No authentication or encryption is done
|
// SendVia sends a payload through a Relay tunnel. No authentication or encryption is done
|
||||||
// to the payload for the ultimate target host, making this a useful method for sending
|
// to the payload for the ultimate target host, making this a useful method for sending
|
||||||
// handshake messages to peers through relay tunnels.
|
// handshake messages to peers through relay tunnels.
|
||||||
// via is the HostInfo through which the message is relayed.
|
// via is the HostInfo through which the message is relayed.
|
||||||
@@ -212,23 +198,28 @@ func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *C
|
|||||||
// nb is a buffer used to store the nonce value, re-used for performance reasons.
|
// nb is a buffer used to store the nonce value, re-used for performance reasons.
|
||||||
// out is a buffer used to store the result of the Encrypt operation
|
// out is a buffer used to store the result of the Encrypt operation
|
||||||
// q indicates which writer to use to send the packet.
|
// q indicates which writer to use to send the packet.
|
||||||
func (f *Interface) SendVia(viaIfc interface{},
|
func (f *Interface) SendVia(via *HostInfo,
|
||||||
relayIfc interface{},
|
relay *Relay,
|
||||||
ad,
|
ad,
|
||||||
nb,
|
nb,
|
||||||
out []byte,
|
out []byte,
|
||||||
nocopy bool,
|
nocopy bool,
|
||||||
) {
|
) {
|
||||||
via := viaIfc.(*HostInfo)
|
if noiseutil.EncryptLockNeeded {
|
||||||
relay := relayIfc.(*Relay)
|
// NOTE: for goboring AESGCMTLS we need to lock because of the nonce check
|
||||||
c := atomic.AddUint64(&via.ConnectionState.atomicMessageCounter, 1)
|
via.ConnectionState.writeLock.Lock()
|
||||||
|
}
|
||||||
|
c := via.ConnectionState.messageCounter.Add(1)
|
||||||
|
|
||||||
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
||||||
f.connectionManager.Out(via.vpnIp)
|
f.connectionManager.Out(via.localIndexId)
|
||||||
|
|
||||||
// Authenticate the header and payload, but do not encrypt for this message type.
|
// Authenticate the header and payload, but do not encrypt for this message type.
|
||||||
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
||||||
if len(out)+len(ad)+via.ConnectionState.eKey.Overhead() > cap(out) {
|
if len(out)+len(ad)+via.ConnectionState.eKey.Overhead() > cap(out) {
|
||||||
|
if noiseutil.EncryptLockNeeded {
|
||||||
|
via.ConnectionState.writeLock.Unlock()
|
||||||
|
}
|
||||||
via.logger(f.l).
|
via.logger(f.l).
|
||||||
WithField("outCap", cap(out)).
|
WithField("outCap", cap(out)).
|
||||||
WithField("payloadLen", len(ad)).
|
WithField("payloadLen", len(ad)).
|
||||||
@@ -250,6 +241,9 @@ func (f *Interface) SendVia(viaIfc interface{},
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
out, err = via.ConnectionState.eKey.EncryptDanger(out, out, nil, c, nb)
|
out, err = via.ConnectionState.eKey.EncryptDanger(out, out, nil, c, nb)
|
||||||
|
if noiseutil.EncryptLockNeeded {
|
||||||
|
via.ConnectionState.writeLock.Unlock()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
via.logger(f.l).WithError(err).Info("Failed to EncryptDanger in sendVia")
|
via.logger(f.l).WithError(err).Info("Failed to EncryptDanger in sendVia")
|
||||||
return
|
return
|
||||||
@@ -258,6 +252,7 @@ func (f *Interface) SendVia(viaIfc interface{},
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
via.logger(f.l).WithError(err).Info("Failed to WriteTo in sendVia")
|
via.logger(f.l).WithError(err).Info("Failed to WriteTo in sendVia")
|
||||||
}
|
}
|
||||||
|
f.connectionManager.RelayUsed(relay.LocalIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) {
|
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) {
|
||||||
@@ -278,20 +273,22 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
out = out[header.Len:]
|
out = out[header.Len:]
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: enable if we do more than 1 tun queue
|
if noiseutil.EncryptLockNeeded {
|
||||||
//ci.writeLock.Lock()
|
// NOTE: for goboring AESGCMTLS we need to lock because of the nonce check
|
||||||
c := atomic.AddUint64(&ci.atomicMessageCounter, 1)
|
ci.writeLock.Lock()
|
||||||
|
}
|
||||||
|
c := ci.messageCounter.Add(1)
|
||||||
|
|
||||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||||
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||||
f.connectionManager.Out(hostinfo.vpnIp)
|
f.connectionManager.Out(hostinfo.localIndexId)
|
||||||
|
|
||||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||||
// all our IPs and enable a faster roaming.
|
// all our IPs and enable a faster roaming.
|
||||||
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
|
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
|
||||||
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
|
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
|
||||||
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
|
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
|
||||||
f.lightHouse.QueryServer(hostinfo.vpnIp, f)
|
f.lightHouse.QueryServer(hostinfo.vpnIp)
|
||||||
hostinfo.lastRebindCount = f.rebindCount
|
hostinfo.lastRebindCount = f.rebindCount
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter")
|
f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter")
|
||||||
@@ -300,8 +297,9 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
||||||
//TODO: see above note on lock
|
if noiseutil.EncryptLockNeeded {
|
||||||
//ci.writeLock.Unlock()
|
ci.writeLock.Unlock()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).
|
hostinfo.logger(f.l).WithError(err).
|
||||||
WithField("udpAddr", remote).WithField("counter", c).
|
WithField("udpAddr", remote).WithField("counter", c).
|
||||||
@@ -325,31 +323,19 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
} else {
|
} else {
|
||||||
// Try to send via a relay
|
// Try to send via a relay
|
||||||
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
||||||
relayHostInfo, err := f.hostMap.QueryVpnIp(relayIP)
|
relayHostInfo, relay, err := f.hostMap.QueryVpnIpRelayFor(hostinfo.vpnIp, relayIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithField("relayIp", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
hostinfo.relayState.DeleteRelay(relayIP)
|
||||||
continue
|
hostinfo.logger(f.l).WithField("relay", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
||||||
}
|
|
||||||
relay, ok := relayHostInfo.relayState.QueryRelayForByIp(hostinfo.vpnIp)
|
|
||||||
if !ok {
|
|
||||||
hostinfo.logger(f.l).
|
|
||||||
WithField("relayIp", relayHostInfo.vpnIp).
|
|
||||||
WithField("relayTarget", hostinfo.vpnIp).
|
|
||||||
Info("sendNoMetrics relay missing object for target")
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true)
|
f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isMulticast(ip iputil.VpnIp) bool {
|
func isMulticast(ip iputil.VpnIp) bool {
|
||||||
// Class D multicast
|
// Class D multicast
|
||||||
if (((ip >> 24) & 0xff) & 0xf0) == 0xe0 {
|
return (((ip >> 24) & 0xff) & 0xf0) == 0xe0
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|||||||
6
inside_bsd.go
Normal file
6
inside_bsd.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
|
||||||
|
// +build darwin dragonfly freebsd netbsd openbsd
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
const immediatelyForwardToSelf bool = true
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
package nebula
|
|
||||||
|
|
||||||
const immediatelyForwardToSelf bool = true
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
//go:build !darwin
|
//go:build !darwin && !dragonfly && !freebsd && !netbsd && !openbsd
|
||||||
// +build !darwin
|
// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd
|
||||||
|
|
||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
|
|||||||
155
interface.go
155
interface.go
@@ -13,9 +13,9 @@ import (
|
|||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/overlay"
|
"github.com/slackhq/nebula/overlay"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
@@ -25,24 +25,27 @@ const mtu = 9001
|
|||||||
|
|
||||||
type InterfaceConfig struct {
|
type InterfaceConfig struct {
|
||||||
HostMap *HostMap
|
HostMap *HostMap
|
||||||
Outside *udp.Conn
|
Outside udp.Conn
|
||||||
Inside overlay.Device
|
Inside overlay.Device
|
||||||
certState *CertState
|
pki *PKI
|
||||||
Cipher string
|
Cipher string
|
||||||
Firewall *Firewall
|
Firewall *Firewall
|
||||||
ServeDns bool
|
ServeDns bool
|
||||||
HandshakeManager *HandshakeManager
|
HandshakeManager *HandshakeManager
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
checkInterval int
|
checkInterval time.Duration
|
||||||
pendingDeletionInterval int
|
pendingDeletionInterval time.Duration
|
||||||
DropLocalBroadcast bool
|
DropLocalBroadcast bool
|
||||||
DropMulticast bool
|
DropMulticast bool
|
||||||
routines int
|
routines int
|
||||||
MessageMetrics *MessageMetrics
|
MessageMetrics *MessageMetrics
|
||||||
version string
|
version string
|
||||||
caPool *cert.NebulaCAPool
|
|
||||||
disconnectInvalid bool
|
|
||||||
relayManager *relayManager
|
relayManager *relayManager
|
||||||
|
punchy *Punchy
|
||||||
|
|
||||||
|
tryPromoteEvery uint32
|
||||||
|
reQueryEvery uint32
|
||||||
|
reQueryWait time.Duration
|
||||||
|
|
||||||
ConntrackCacheTimeout time.Duration
|
ConntrackCacheTimeout time.Duration
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
@@ -50,9 +53,9 @@ type InterfaceConfig struct {
|
|||||||
|
|
||||||
type Interface struct {
|
type Interface struct {
|
||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
outside *udp.Conn
|
outside udp.Conn
|
||||||
inside overlay.Device
|
inside overlay.Device
|
||||||
certState *CertState
|
pki *PKI
|
||||||
cipher string
|
cipher string
|
||||||
firewall *Firewall
|
firewall *Firewall
|
||||||
connectionManager *connectionManager
|
connectionManager *connectionManager
|
||||||
@@ -65,11 +68,14 @@ type Interface struct {
|
|||||||
dropLocalBroadcast bool
|
dropLocalBroadcast bool
|
||||||
dropMulticast bool
|
dropMulticast bool
|
||||||
routines int
|
routines int
|
||||||
caPool *cert.NebulaCAPool
|
disconnectInvalid atomic.Bool
|
||||||
disconnectInvalid bool
|
closed atomic.Bool
|
||||||
closed int32
|
|
||||||
relayManager *relayManager
|
relayManager *relayManager
|
||||||
|
|
||||||
|
tryPromoteEvery atomic.Uint32
|
||||||
|
reQueryEvery atomic.Uint32
|
||||||
|
reQueryWait atomic.Int64
|
||||||
|
|
||||||
sendRecvErrorConfig sendRecvErrorConfig
|
sendRecvErrorConfig sendRecvErrorConfig
|
||||||
|
|
||||||
// rebindCount is used to decide if an active tunnel should trigger a punch notification through a lighthouse
|
// rebindCount is used to decide if an active tunnel should trigger a punch notification through a lighthouse
|
||||||
@@ -78,7 +84,7 @@ type Interface struct {
|
|||||||
|
|
||||||
conntrackCacheTimeout time.Duration
|
conntrackCacheTimeout time.Duration
|
||||||
|
|
||||||
writers []*udp.Conn
|
writers []udp.Conn
|
||||||
readers []io.ReadWriteCloser
|
readers []io.ReadWriteCloser
|
||||||
|
|
||||||
metricHandshakes metrics.Histogram
|
metricHandshakes metrics.Histogram
|
||||||
@@ -88,6 +94,19 @@ type Interface struct {
|
|||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EncWriter interface {
|
||||||
|
SendVia(via *HostInfo,
|
||||||
|
relay *Relay,
|
||||||
|
ad,
|
||||||
|
nb,
|
||||||
|
out []byte,
|
||||||
|
nocopy bool,
|
||||||
|
)
|
||||||
|
SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte)
|
||||||
|
SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte)
|
||||||
|
Handshake(vpnIp iputil.VpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
type sendRecvErrorConfig uint8
|
type sendRecvErrorConfig uint8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -129,34 +148,33 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
if c.Inside == nil {
|
if c.Inside == nil {
|
||||||
return nil, errors.New("no inside interface (tun)")
|
return nil, errors.New("no inside interface (tun)")
|
||||||
}
|
}
|
||||||
if c.certState == nil {
|
if c.pki == nil {
|
||||||
return nil, errors.New("no certificate state")
|
return nil, errors.New("no certificate state")
|
||||||
}
|
}
|
||||||
if c.Firewall == nil {
|
if c.Firewall == nil {
|
||||||
return nil, errors.New("no firewall rules")
|
return nil, errors.New("no firewall rules")
|
||||||
}
|
}
|
||||||
|
|
||||||
myVpnIp := iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].IP)
|
certificate := c.pki.GetCertState().Certificate
|
||||||
|
myVpnIp := iputil.Ip2VpnIp(certificate.Details.Ips[0].IP)
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
|
pki: c.pki,
|
||||||
hostMap: c.HostMap,
|
hostMap: c.HostMap,
|
||||||
outside: c.Outside,
|
outside: c.Outside,
|
||||||
inside: c.Inside,
|
inside: c.Inside,
|
||||||
certState: c.certState,
|
|
||||||
cipher: c.Cipher,
|
cipher: c.Cipher,
|
||||||
firewall: c.Firewall,
|
firewall: c.Firewall,
|
||||||
serveDns: c.ServeDns,
|
serveDns: c.ServeDns,
|
||||||
handshakeManager: c.HandshakeManager,
|
handshakeManager: c.HandshakeManager,
|
||||||
createTime: time.Now(),
|
createTime: time.Now(),
|
||||||
lightHouse: c.lightHouse,
|
lightHouse: c.lightHouse,
|
||||||
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].Mask),
|
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(certificate.Details.Ips[0].Mask),
|
||||||
dropLocalBroadcast: c.DropLocalBroadcast,
|
dropLocalBroadcast: c.DropLocalBroadcast,
|
||||||
dropMulticast: c.DropMulticast,
|
dropMulticast: c.DropMulticast,
|
||||||
routines: c.routines,
|
routines: c.routines,
|
||||||
version: c.version,
|
version: c.version,
|
||||||
writers: make([]*udp.Conn, c.routines),
|
writers: make([]udp.Conn, c.routines),
|
||||||
readers: make([]io.ReadWriteCloser, c.routines),
|
readers: make([]io.ReadWriteCloser, c.routines),
|
||||||
caPool: c.caPool,
|
|
||||||
disconnectInvalid: c.disconnectInvalid,
|
|
||||||
myVpnIp: myVpnIp,
|
myVpnIp: myVpnIp,
|
||||||
relayManager: c.relayManager,
|
relayManager: c.relayManager,
|
||||||
|
|
||||||
@@ -172,7 +190,11 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
l: c.l,
|
l: c.l,
|
||||||
}
|
}
|
||||||
|
|
||||||
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval)
|
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
||||||
|
ifce.reQueryEvery.Store(c.reQueryEvery)
|
||||||
|
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
||||||
|
|
||||||
|
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
|
||||||
|
|
||||||
return ifce, nil
|
return ifce, nil
|
||||||
}
|
}
|
||||||
@@ -190,6 +212,7 @@ func (f *Interface) activate() {
|
|||||||
|
|
||||||
f.l.WithField("interface", f.inside.Name()).WithField("network", f.inside.Cidr().String()).
|
f.l.WithField("interface", f.inside.Name()).WithField("network", f.inside.Cidr().String()).
|
||||||
WithField("build", f.version).WithField("udpAddr", addr).
|
WithField("build", f.version).WithField("udpAddr", addr).
|
||||||
|
WithField("boringcrypto", boringEnabled()).
|
||||||
Info("Nebula interface is active")
|
Info("Nebula interface is active")
|
||||||
|
|
||||||
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
|
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
|
||||||
@@ -227,7 +250,7 @@ func (f *Interface) run() {
|
|||||||
func (f *Interface) listenOut(i int) {
|
func (f *Interface) listenOut(i int) {
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
|
|
||||||
var li *udp.Conn
|
var li udp.Conn
|
||||||
// TODO clean this up with a coherent interface for each outside connection
|
// TODO clean this up with a coherent interface for each outside connection
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
li = f.writers[i]
|
li = f.writers[i]
|
||||||
@@ -237,7 +260,7 @@ func (f *Interface) listenOut(i int) {
|
|||||||
|
|
||||||
lhh := f.lightHouse.NewRequestHandler()
|
lhh := f.lightHouse.NewRequestHandler()
|
||||||
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||||
li.ListenOut(f.readOutsidePackets, lhh.HandleRequest, conntrackCache, i)
|
li.ListenOut(readOutsidePackets(f), lhHandleRequest(lhh, f), conntrackCache, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
||||||
@@ -253,7 +276,7 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
|||||||
for {
|
for {
|
||||||
n, err := reader.Read(packet)
|
n, err := reader.Read(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrClosed) && atomic.LoadInt32(&f.closed) != 0 {
|
if errors.Is(err, os.ErrClosed) && f.closed.Load() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -267,46 +290,24 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
|
func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
|
||||||
c.RegisterReloadCallback(f.reloadCA)
|
|
||||||
c.RegisterReloadCallback(f.reloadCertKey)
|
|
||||||
c.RegisterReloadCallback(f.reloadFirewall)
|
c.RegisterReloadCallback(f.reloadFirewall)
|
||||||
c.RegisterReloadCallback(f.reloadSendRecvError)
|
c.RegisterReloadCallback(f.reloadSendRecvError)
|
||||||
|
c.RegisterReloadCallback(f.reloadDisconnectInvalid)
|
||||||
|
c.RegisterReloadCallback(f.reloadMisc)
|
||||||
|
|
||||||
for _, udpConn := range f.writers {
|
for _, udpConn := range f.writers {
|
||||||
c.RegisterReloadCallback(udpConn.ReloadConfig)
|
c.RegisterReloadCallback(udpConn.ReloadConfig)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) reloadCA(c *config.C) {
|
func (f *Interface) reloadDisconnectInvalid(c *config.C) {
|
||||||
// reload and check regardless
|
initial := c.InitialLoad()
|
||||||
// todo: need mutex?
|
if initial || c.HasChanged("pki.disconnect_invalid") {
|
||||||
newCAs, err := loadCAFromConfig(f.l, c)
|
f.disconnectInvalid.Store(c.GetBool("pki.disconnect_invalid", true))
|
||||||
if err != nil {
|
if !initial {
|
||||||
f.l.WithError(err).Error("Could not refresh trusted CA certificates")
|
f.l.Infof("pki.disconnect_invalid changed to %v", f.disconnectInvalid.Load())
|
||||||
return
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f.caPool = newCAs
|
|
||||||
f.l.WithField("fingerprints", f.caPool.GetFingerprints()).Info("Trusted CA certificates refreshed")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Interface) reloadCertKey(c *config.C) {
|
|
||||||
// reload and check in all cases
|
|
||||||
cs, err := NewCertStateFromConfig(c)
|
|
||||||
if err != nil {
|
|
||||||
f.l.WithError(err).Error("Could not refresh client cert")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// did IP in cert change? if so, don't set
|
|
||||||
oldIPs := f.certState.certificate.Details.Ips
|
|
||||||
newIPs := cs.certificate.Details.Ips
|
|
||||||
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
|
|
||||||
f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.certState = cs
|
|
||||||
f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) reloadFirewall(c *config.C) {
|
func (f *Interface) reloadFirewall(c *config.C) {
|
||||||
@@ -316,7 +317,7 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fw, err := NewFirewallFromConfig(f.l, f.certState.certificate, c)
|
fw, err := NewFirewallFromConfig(f.l, f.pki.GetCertState().Certificate, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).Error("Error while creating firewall during reload")
|
f.l.WithError(err).Error("Error while creating firewall during reload")
|
||||||
return
|
return
|
||||||
@@ -331,8 +332,8 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
|||||||
// If rulesVersion is back to zero, we have wrapped all the way around. Be
|
// If rulesVersion is back to zero, we have wrapped all the way around. Be
|
||||||
// safe and just reset conntrack in this case.
|
// safe and just reset conntrack in this case.
|
||||||
if fw.rulesVersion == 0 {
|
if fw.rulesVersion == 0 {
|
||||||
f.l.WithField("firewallHash", fw.GetRuleHash()).
|
f.l.WithField("firewallHashes", fw.GetRuleHashes()).
|
||||||
WithField("oldFirewallHash", oldFw.GetRuleHash()).
|
WithField("oldFirewallHashes", oldFw.GetRuleHashes()).
|
||||||
WithField("rulesVersion", fw.rulesVersion).
|
WithField("rulesVersion", fw.rulesVersion).
|
||||||
Warn("firewall rulesVersion has overflowed, resetting conntrack")
|
Warn("firewall rulesVersion has overflowed, resetting conntrack")
|
||||||
} else {
|
} else {
|
||||||
@@ -342,8 +343,8 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
|||||||
f.firewall = fw
|
f.firewall = fw
|
||||||
|
|
||||||
oldFw.Destroy()
|
oldFw.Destroy()
|
||||||
f.l.WithField("firewallHash", fw.GetRuleHash()).
|
f.l.WithField("firewallHashes", fw.GetRuleHashes()).
|
||||||
WithField("oldFirewallHash", oldFw.GetRuleHash()).
|
WithField("oldFirewallHashes", oldFw.GetRuleHashes()).
|
||||||
WithField("rulesVersion", fw.rulesVersion).
|
WithField("rulesVersion", fw.rulesVersion).
|
||||||
Info("New firewall has been installed")
|
Info("New firewall has been installed")
|
||||||
}
|
}
|
||||||
@@ -372,12 +373,34 @@ func (f *Interface) reloadSendRecvError(c *config.C) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Interface) reloadMisc(c *config.C) {
|
||||||
|
if c.HasChanged("counters.try_promote") {
|
||||||
|
n := c.GetUint32("counters.try_promote", defaultPromoteEvery)
|
||||||
|
f.tryPromoteEvery.Store(n)
|
||||||
|
f.l.Info("counters.try_promote has changed")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.HasChanged("counters.requery_every_packets") {
|
||||||
|
n := c.GetUint32("counters.requery_every_packets", defaultReQueryEvery)
|
||||||
|
f.reQueryEvery.Store(n)
|
||||||
|
f.l.Info("counters.requery_every_packets has changed")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.HasChanged("timers.requery_wait_duration") {
|
||||||
|
n := c.GetDuration("timers.requery_wait_duration", defaultReQueryWait)
|
||||||
|
f.reQueryWait.Store(int64(n))
|
||||||
|
f.l.Info("timers.requery_wait_duration has changed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||||
ticker := time.NewTicker(i)
|
ticker := time.NewTicker(i)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
||||||
|
|
||||||
|
certExpirationGauge := metrics.GetOrRegisterGauge("certificate.ttl_seconds", nil)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -386,12 +409,20 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
|||||||
f.firewall.EmitStats()
|
f.firewall.EmitStats()
|
||||||
f.handshakeManager.EmitStats()
|
f.handshakeManager.EmitStats()
|
||||||
udpStats()
|
udpStats()
|
||||||
|
certExpirationGauge.Update(int64(f.pki.GetCertState().Certificate.Details.NotAfter.Sub(time.Now()) / time.Second))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) Close() error {
|
func (f *Interface) Close() error {
|
||||||
atomic.StoreInt32(&f.closed, 1)
|
f.closed.Store(true)
|
||||||
|
|
||||||
|
for _, u := range f.writers {
|
||||||
|
err := u.Close()
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithError(err).Error("Error while closing udp socket")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Release the tun device
|
// Release the tun device
|
||||||
return f.inside.Close()
|
return f.inside.Close()
|
||||||
|
|||||||
238
iputil/packet.go
Normal file
238
iputil/packet.go
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
package iputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"golang.org/x/net/ipv4"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Need 96 bytes for the largest reject packet:
|
||||||
|
// - 20 byte ipv4 header
|
||||||
|
// - 8 byte icmpv4 header
|
||||||
|
// - 68 byte body (60 byte max orig ipv4 header + 8 byte orig icmpv4 header)
|
||||||
|
MaxRejectPacketSize = ipv4.HeaderLen + 8 + 60 + 8
|
||||||
|
)
|
||||||
|
|
||||||
|
func CreateRejectPacket(packet []byte, out []byte) []byte {
|
||||||
|
if len(packet) < ipv4.HeaderLen || int(packet[0]>>4) != ipv4.Version {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch packet[9] {
|
||||||
|
case 6: // tcp
|
||||||
|
return ipv4CreateRejectTCPPacket(packet, out)
|
||||||
|
default:
|
||||||
|
return ipv4CreateRejectICMPPacket(packet, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ipv4CreateRejectICMPPacket(packet []byte, out []byte) []byte {
|
||||||
|
ihl := int(packet[0]&0x0f) << 2
|
||||||
|
|
||||||
|
if len(packet) < ihl {
|
||||||
|
// We need at least this many bytes for this to be a valid packet
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ICMP reply includes original header and first 8 bytes of the packet
|
||||||
|
packetLen := len(packet)
|
||||||
|
if packetLen > ihl+8 {
|
||||||
|
packetLen = ihl + 8
|
||||||
|
}
|
||||||
|
|
||||||
|
outLen := ipv4.HeaderLen + 8 + packetLen
|
||||||
|
if outLen > cap(out) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out = out[:outLen]
|
||||||
|
|
||||||
|
ipHdr := out[0:ipv4.HeaderLen]
|
||||||
|
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
|
||||||
|
ipHdr[1] = 0 // DSCP, ECN
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[2:], uint16(outLen)) // Total Length
|
||||||
|
|
||||||
|
ipHdr[4] = 0 // id
|
||||||
|
ipHdr[5] = 0 // .
|
||||||
|
ipHdr[6] = 0 // flags, fragment offset
|
||||||
|
ipHdr[7] = 0 // .
|
||||||
|
ipHdr[8] = 64 // TTL
|
||||||
|
ipHdr[9] = 1 // protocol (icmp)
|
||||||
|
ipHdr[10] = 0 // checksum
|
||||||
|
ipHdr[11] = 0 // .
|
||||||
|
|
||||||
|
// Swap dest / src IPs
|
||||||
|
copy(ipHdr[12:16], packet[16:20])
|
||||||
|
copy(ipHdr[16:20], packet[12:16])
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[10:], tcpipChecksum(ipHdr, 0))
|
||||||
|
|
||||||
|
// ICMP Destination Unreachable
|
||||||
|
icmpOut := out[ipv4.HeaderLen:]
|
||||||
|
icmpOut[0] = 3 // type (Destination unreachable)
|
||||||
|
icmpOut[1] = 3 // code (Port unreachable error)
|
||||||
|
icmpOut[2] = 0 // checksum
|
||||||
|
icmpOut[3] = 0 // .
|
||||||
|
icmpOut[4] = 0 // unused
|
||||||
|
icmpOut[5] = 0 // .
|
||||||
|
icmpOut[6] = 0 // .
|
||||||
|
icmpOut[7] = 0 // .
|
||||||
|
|
||||||
|
// Copy original IP header and first 8 bytes as body
|
||||||
|
copy(icmpOut[8:], packet[:packetLen])
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
binary.BigEndian.PutUint16(icmpOut[2:], tcpipChecksum(icmpOut, 0))
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func ipv4CreateRejectTCPPacket(packet []byte, out []byte) []byte {
|
||||||
|
const tcpLen = 20
|
||||||
|
|
||||||
|
ihl := int(packet[0]&0x0f) << 2
|
||||||
|
outLen := ipv4.HeaderLen + tcpLen
|
||||||
|
|
||||||
|
if len(packet) < ihl+tcpLen {
|
||||||
|
// We need at least this many bytes for this to be a valid packet
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if outLen > cap(out) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out = out[:outLen]
|
||||||
|
|
||||||
|
ipHdr := out[0:ipv4.HeaderLen]
|
||||||
|
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
|
||||||
|
ipHdr[1] = 0 // DSCP, ECN
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[2:], uint16(outLen)) // Total Length
|
||||||
|
ipHdr[4] = 0 // id
|
||||||
|
ipHdr[5] = 0 // .
|
||||||
|
ipHdr[6] = 0 // flags, fragment offset
|
||||||
|
ipHdr[7] = 0 // .
|
||||||
|
ipHdr[8] = 64 // TTL
|
||||||
|
ipHdr[9] = 6 // protocol (tcp)
|
||||||
|
ipHdr[10] = 0 // checksum
|
||||||
|
ipHdr[11] = 0 // .
|
||||||
|
|
||||||
|
// Swap dest / src IPs
|
||||||
|
copy(ipHdr[12:16], packet[16:20])
|
||||||
|
copy(ipHdr[16:20], packet[12:16])
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[10:], tcpipChecksum(ipHdr, 0))
|
||||||
|
|
||||||
|
// TCP RST
|
||||||
|
tcpIn := packet[ihl:]
|
||||||
|
var ackSeq, seq uint32
|
||||||
|
outFlags := byte(0b00000100) // RST
|
||||||
|
|
||||||
|
// Set seq and ackSeq based on how iptables/netfilter does it in Linux:
|
||||||
|
// - https://github.com/torvalds/linux/blob/v5.19/net/ipv4/netfilter/nf_reject_ipv4.c#L193-L221
|
||||||
|
inAck := tcpIn[13]&0b00010000 != 0
|
||||||
|
if inAck {
|
||||||
|
seq = binary.BigEndian.Uint32(tcpIn[8:])
|
||||||
|
} else {
|
||||||
|
inSyn := uint32((tcpIn[13] & 0b00000010) >> 1)
|
||||||
|
inFin := uint32(tcpIn[13] & 0b00000001)
|
||||||
|
// seq from the packet + syn + fin + tcp segment length
|
||||||
|
ackSeq = binary.BigEndian.Uint32(tcpIn[4:]) + inSyn + inFin + uint32(len(tcpIn)) - uint32(tcpIn[12]>>4)<<2
|
||||||
|
outFlags |= 0b00010000 // ACK
|
||||||
|
}
|
||||||
|
|
||||||
|
tcpOut := out[ipv4.HeaderLen:]
|
||||||
|
// Swap dest / src ports
|
||||||
|
copy(tcpOut[0:2], tcpIn[2:4])
|
||||||
|
copy(tcpOut[2:4], tcpIn[0:2])
|
||||||
|
binary.BigEndian.PutUint32(tcpOut[4:], seq)
|
||||||
|
binary.BigEndian.PutUint32(tcpOut[8:], ackSeq)
|
||||||
|
tcpOut[12] = (tcpLen >> 2) << 4 // data offset, reserved, NS
|
||||||
|
tcpOut[13] = outFlags // CWR, ECE, URG, ACK, PSH, RST, SYN, FIN
|
||||||
|
tcpOut[14] = 0 // window size
|
||||||
|
tcpOut[15] = 0 // .
|
||||||
|
tcpOut[16] = 0 // checksum
|
||||||
|
tcpOut[17] = 0 // .
|
||||||
|
tcpOut[18] = 0 // URG Pointer
|
||||||
|
tcpOut[19] = 0 // .
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
csum := ipv4PseudoheaderChecksum(ipHdr[12:16], ipHdr[16:20], 6, tcpLen)
|
||||||
|
binary.BigEndian.PutUint16(tcpOut[16:], tcpipChecksum(tcpOut, csum))
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateICMPEchoResponse(packet, out []byte) []byte {
|
||||||
|
// Return early if this is not a simple ICMP Echo Request
|
||||||
|
//TODO: make constants out of these
|
||||||
|
if !(len(packet) >= 28 && len(packet) <= 9001 && packet[0] == 0x45 && packet[9] == 0x01 && packet[20] == 0x08) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't support fragmented packets
|
||||||
|
if packet[7] != 0 || (packet[6]&0x2F != 0) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out = out[:len(packet)]
|
||||||
|
|
||||||
|
copy(out, packet)
|
||||||
|
|
||||||
|
// Swap dest / src IPs and recalculate checksum
|
||||||
|
ipv4 := out[0:20]
|
||||||
|
copy(ipv4[12:16], packet[16:20])
|
||||||
|
copy(ipv4[16:20], packet[12:16])
|
||||||
|
ipv4[10] = 0
|
||||||
|
ipv4[11] = 0
|
||||||
|
binary.BigEndian.PutUint16(ipv4[10:], tcpipChecksum(ipv4, 0))
|
||||||
|
|
||||||
|
// Change type to ICMP Echo Reply and recalculate checksum
|
||||||
|
icmp := out[20:]
|
||||||
|
icmp[0] = 0
|
||||||
|
icmp[2] = 0
|
||||||
|
icmp[3] = 0
|
||||||
|
binary.BigEndian.PutUint16(icmp[2:], tcpipChecksum(icmp, 0))
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculates the TCP/IP checksum defined in rfc1071. The passed-in
|
||||||
|
// csum is any initial checksum data that's already been computed.
|
||||||
|
//
|
||||||
|
// based on:
|
||||||
|
// - https://github.com/google/gopacket/blob/v1.1.19/layers/tcpip.go#L50-L70
|
||||||
|
func tcpipChecksum(data []byte, csum uint32) uint16 {
|
||||||
|
// to handle odd lengths, we loop to length - 1, incrementing by 2, then
|
||||||
|
// handle the last byte specifically by checking against the original
|
||||||
|
// length.
|
||||||
|
length := len(data) - 1
|
||||||
|
for i := 0; i < length; i += 2 {
|
||||||
|
// For our test packet, doing this manually is about 25% faster
|
||||||
|
// (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16.
|
||||||
|
csum += uint32(data[i]) << 8
|
||||||
|
csum += uint32(data[i+1])
|
||||||
|
}
|
||||||
|
if len(data)%2 == 1 {
|
||||||
|
csum += uint32(data[length]) << 8
|
||||||
|
}
|
||||||
|
for csum > 0xffff {
|
||||||
|
csum = (csum >> 16) + (csum & 0xffff)
|
||||||
|
}
|
||||||
|
return ^uint16(csum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// based on:
|
||||||
|
// - https://github.com/google/gopacket/blob/v1.1.19/layers/tcpip.go#L26-L35
|
||||||
|
func ipv4PseudoheaderChecksum(src, dst []byte, proto, length uint32) (csum uint32) {
|
||||||
|
csum += (uint32(src[0]) + uint32(src[2])) << 8
|
||||||
|
csum += uint32(src[1]) + uint32(src[3])
|
||||||
|
csum += (uint32(dst[0]) + uint32(dst[2])) << 8
|
||||||
|
csum += uint32(dst[1]) + uint32(dst[3])
|
||||||
|
csum += proto
|
||||||
|
csum += length & 0xffff
|
||||||
|
csum += length >> 16
|
||||||
|
return csum
|
||||||
|
}
|
||||||
73
iputil/packet_test.go
Normal file
73
iputil/packet_test.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package iputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/net/ipv4"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_CreateRejectPacket(t *testing.T) {
|
||||||
|
h := ipv4.Header{
|
||||||
|
Len: 20,
|
||||||
|
Src: net.IPv4(10, 0, 0, 1),
|
||||||
|
Dst: net.IPv4(10, 0, 0, 2),
|
||||||
|
Protocol: 1, // ICMP
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := h.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("h.Marhshal: %v", err)
|
||||||
|
}
|
||||||
|
b = append(b, []byte{0, 3, 0, 4}...)
|
||||||
|
|
||||||
|
expectedLen := ipv4.HeaderLen + 8 + h.Len + 4
|
||||||
|
out := make([]byte, expectedLen)
|
||||||
|
rejectPacket := CreateRejectPacket(b, out)
|
||||||
|
assert.NotNil(t, rejectPacket)
|
||||||
|
assert.Len(t, rejectPacket, expectedLen)
|
||||||
|
|
||||||
|
// ICMP with max header len
|
||||||
|
h = ipv4.Header{
|
||||||
|
Len: 60,
|
||||||
|
Src: net.IPv4(10, 0, 0, 1),
|
||||||
|
Dst: net.IPv4(10, 0, 0, 2),
|
||||||
|
Protocol: 1, // ICMP
|
||||||
|
Options: make([]byte, 40),
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = h.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("h.Marhshal: %v", err)
|
||||||
|
}
|
||||||
|
b = append(b, []byte{0, 3, 0, 4, 0, 0, 0, 0}...)
|
||||||
|
|
||||||
|
expectedLen = MaxRejectPacketSize
|
||||||
|
out = make([]byte, MaxRejectPacketSize)
|
||||||
|
rejectPacket = CreateRejectPacket(b, out)
|
||||||
|
assert.NotNil(t, rejectPacket)
|
||||||
|
assert.Len(t, rejectPacket, expectedLen)
|
||||||
|
|
||||||
|
// TCP with max header len
|
||||||
|
h = ipv4.Header{
|
||||||
|
Len: 60,
|
||||||
|
Src: net.IPv4(10, 0, 0, 1),
|
||||||
|
Dst: net.IPv4(10, 0, 0, 2),
|
||||||
|
Protocol: 6, // TCP
|
||||||
|
Options: make([]byte, 40),
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = h.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("h.Marhshal: %v", err)
|
||||||
|
}
|
||||||
|
b = append(b, []byte{0, 3, 0, 4}...)
|
||||||
|
b = append(b, make([]byte, 16)...)
|
||||||
|
|
||||||
|
expectedLen = ipv4.HeaderLen + 20
|
||||||
|
out = make([]byte, expectedLen)
|
||||||
|
rejectPacket = CreateRejectPacket(b, out)
|
||||||
|
assert.NotNil(t, rejectPacket)
|
||||||
|
assert.Len(t, rejectPacket, expectedLen)
|
||||||
|
}
|
||||||
478
lighthouse.go
478
lighthouse.go
@@ -6,13 +6,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/netip"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
@@ -33,11 +34,12 @@ type netIpAndPort struct {
|
|||||||
type LightHouse struct {
|
type LightHouse struct {
|
||||||
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
|
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
|
||||||
sync.RWMutex //Because we concurrently read and write to our maps
|
sync.RWMutex //Because we concurrently read and write to our maps
|
||||||
|
ctx context.Context
|
||||||
amLighthouse bool
|
amLighthouse bool
|
||||||
myVpnIp iputil.VpnIp
|
myVpnIp iputil.VpnIp
|
||||||
myVpnZeros iputil.VpnIp
|
myVpnZeros iputil.VpnIp
|
||||||
myVpnNet *net.IPNet
|
myVpnNet *net.IPNet
|
||||||
punchConn *udp.Conn
|
punchConn udp.Conn
|
||||||
punchy *Punchy
|
punchy *Punchy
|
||||||
|
|
||||||
// Local cache of answers from light houses
|
// Local cache of answers from light houses
|
||||||
@@ -49,29 +51,32 @@ type LightHouse struct {
|
|||||||
// respond with.
|
// respond with.
|
||||||
// - When we are not a lighthouse, this filters which addresses we accept
|
// - When we are not a lighthouse, this filters which addresses we accept
|
||||||
// from lighthouses.
|
// from lighthouses.
|
||||||
atomicRemoteAllowList *RemoteAllowList
|
remoteAllowList atomic.Pointer[RemoteAllowList]
|
||||||
|
|
||||||
// filters local addresses that we advertise to lighthouses
|
// filters local addresses that we advertise to lighthouses
|
||||||
atomicLocalAllowList *LocalAllowList
|
localAllowList atomic.Pointer[LocalAllowList]
|
||||||
|
|
||||||
// used to trigger the HandshakeManager when we receive HostQueryReply
|
// used to trigger the HandshakeManager when we receive HostQueryReply
|
||||||
handshakeTrigger chan<- iputil.VpnIp
|
handshakeTrigger chan<- iputil.VpnIp
|
||||||
|
|
||||||
// atomicStaticList exists to avoid having a bool in each addrMap entry
|
// staticList exists to avoid having a bool in each addrMap entry
|
||||||
// since static should be rare
|
// since static should be rare
|
||||||
atomicStaticList map[iputil.VpnIp]struct{}
|
staticList atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||||
atomicLighthouses map[iputil.VpnIp]struct{}
|
lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||||
|
|
||||||
atomicInterval int64
|
interval atomic.Int64
|
||||||
updateCancel context.CancelFunc
|
updateCancel context.CancelFunc
|
||||||
updateParentCtx context.Context
|
ifce EncWriter
|
||||||
updateUdp udp.EncWriter
|
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
||||||
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
|
||||||
|
|
||||||
atomicAdvertiseAddrs []netIpAndPort
|
advertiseAddrs atomic.Pointer[[]netIpAndPort]
|
||||||
|
|
||||||
// IP's of relays that can be used by peers to access me
|
// IP's of relays that can be used by peers to access me
|
||||||
atomicRelaysForMe []iputil.VpnIp
|
relaysForMe atomic.Pointer[[]iputil.VpnIp]
|
||||||
|
|
||||||
|
queryChan chan iputil.VpnIp
|
||||||
|
|
||||||
|
calculatedRemotes atomic.Pointer[cidr.Tree4[[]*calculatedRemote]] // Maps VpnIp to []*calculatedRemote
|
||||||
|
|
||||||
metrics *MessageMetrics
|
metrics *MessageMetrics
|
||||||
metricHolepunchTx metrics.Counter
|
metricHolepunchTx metrics.Counter
|
||||||
@@ -80,7 +85,7 @@ type LightHouse struct {
|
|||||||
|
|
||||||
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
|
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
|
||||||
// addrMap should be nil unless this is during a config reload
|
// addrMap should be nil unless this is during a config reload
|
||||||
func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc *udp.Conn, p *Punchy) (*LightHouse, error) {
|
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc udp.Conn, p *Punchy) (*LightHouse, error) {
|
||||||
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
||||||
nebulaPort := uint32(c.GetInt("listen.port", 0))
|
nebulaPort := uint32(c.GetInt("listen.port", 0))
|
||||||
if amLighthouse && nebulaPort == 0 {
|
if amLighthouse && nebulaPort == 0 {
|
||||||
@@ -98,18 +103,22 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet,
|
|||||||
|
|
||||||
ones, _ := myVpnNet.Mask.Size()
|
ones, _ := myVpnNet.Mask.Size()
|
||||||
h := LightHouse{
|
h := LightHouse{
|
||||||
amLighthouse: amLighthouse,
|
ctx: ctx,
|
||||||
myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP),
|
amLighthouse: amLighthouse,
|
||||||
myVpnZeros: iputil.VpnIp(32 - ones),
|
myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP),
|
||||||
myVpnNet: myVpnNet,
|
myVpnZeros: iputil.VpnIp(32 - ones),
|
||||||
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
myVpnNet: myVpnNet,
|
||||||
nebulaPort: nebulaPort,
|
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
||||||
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
|
nebulaPort: nebulaPort,
|
||||||
atomicStaticList: make(map[iputil.VpnIp]struct{}),
|
punchConn: pc,
|
||||||
punchConn: pc,
|
punchy: p,
|
||||||
punchy: p,
|
queryChan: make(chan iputil.VpnIp, c.GetUint32("handshakes.query_buffer", 64)),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
|
lighthouses := make(map[iputil.VpnIp]struct{})
|
||||||
|
h.lighthouses.Store(&lighthouses)
|
||||||
|
staticList := make(map[iputil.VpnIp]struct{})
|
||||||
|
h.staticList.Store(&staticList)
|
||||||
|
|
||||||
if c.GetBool("stats.lighthouse_metrics", false) {
|
if c.GetBool("stats.lighthouse_metrics", false) {
|
||||||
h.metrics = newLighthouseMetrics()
|
h.metrics = newLighthouseMetrics()
|
||||||
@@ -126,42 +135,48 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet,
|
|||||||
c.RegisterReloadCallback(func(c *config.C) {
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
err := h.reload(c, false)
|
err := h.reload(c, false)
|
||||||
switch v := err.(type) {
|
switch v := err.(type) {
|
||||||
case util.ContextualError:
|
case *util.ContextualError:
|
||||||
v.Log(l)
|
v.Log(l)
|
||||||
case error:
|
case error:
|
||||||
l.WithError(err).Error("failed to reload lighthouse")
|
l.WithError(err).Error("failed to reload lighthouse")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
h.startQueryWorker()
|
||||||
|
|
||||||
return &h, nil
|
return &h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
|
func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
|
||||||
return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList))))
|
return *lh.staticList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
|
func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
|
||||||
return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses))))
|
return *lh.lighthouses.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList {
|
func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList {
|
||||||
return (*RemoteAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList))))
|
return lh.remoteAllowList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
|
func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
|
||||||
return (*LocalAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList))))
|
return lh.localAllowList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort {
|
func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort {
|
||||||
return *(*[]netIpAndPort)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs))))
|
return *lh.advertiseAddrs.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
|
func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
|
||||||
return *(*[]iputil.VpnIp)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe))))
|
return *lh.relaysForMe.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4[[]*calculatedRemote] {
|
||||||
|
return lh.calculatedRemotes.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetUpdateInterval() int64 {
|
func (lh *LightHouse) GetUpdateInterval() int64 {
|
||||||
return atomic.LoadInt64(&lh.atomicInterval)
|
return lh.interval.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||||
@@ -188,7 +203,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort})
|
advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort})
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)), unsafe.Pointer(&advAddrs))
|
lh.advertiseAddrs.Store(&advAddrs)
|
||||||
|
|
||||||
if !initial {
|
if !initial {
|
||||||
lh.l.Info("lighthouse.advertise_addrs has changed")
|
lh.l.Info("lighthouse.advertise_addrs has changed")
|
||||||
@@ -196,17 +211,17 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if initial || c.HasChanged("lighthouse.interval") {
|
if initial || c.HasChanged("lighthouse.interval") {
|
||||||
atomic.StoreInt64(&lh.atomicInterval, int64(c.GetInt("lighthouse.interval", 10)))
|
lh.interval.Store(int64(c.GetInt("lighthouse.interval", 10)))
|
||||||
|
|
||||||
if !initial {
|
if !initial {
|
||||||
lh.l.Infof("lighthouse.interval changed to %v", lh.atomicInterval)
|
lh.l.Infof("lighthouse.interval changed to %v", lh.interval.Load())
|
||||||
|
|
||||||
if lh.updateCancel != nil {
|
if lh.updateCancel != nil {
|
||||||
// May not always have a running routine
|
// May not always have a running routine
|
||||||
lh.updateCancel()
|
lh.updateCancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
lh.LhUpdateWorker(lh.updateParentCtx, lh.updateUdp)
|
lh.StartUpdateWorker()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,7 +231,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
|
return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)), unsafe.Pointer(ral))
|
lh.remoteAllowList.Store(ral)
|
||||||
if !initial {
|
if !initial {
|
||||||
//TODO: a diff will be annoyingly difficult
|
//TODO: a diff will be annoyingly difficult
|
||||||
lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed")
|
lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed")
|
||||||
@@ -229,27 +244,62 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
|
return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)), unsafe.Pointer(lal))
|
lh.localAllowList.Store(lal)
|
||||||
if !initial {
|
if !initial {
|
||||||
//TODO: a diff will be annoyingly difficult
|
//TODO: a diff will be annoyingly difficult
|
||||||
lh.l.Info("lighthouse.local_allow_list has changed")
|
lh.l.Info("lighthouse.local_allow_list has changed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if initial || c.HasChanged("lighthouse.calculated_remotes") {
|
||||||
|
cr, err := NewCalculatedRemotesFromConfig(c, "lighthouse.calculated_remotes")
|
||||||
|
if err != nil {
|
||||||
|
return util.NewContextualError("Invalid lighthouse.calculated_remotes", nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lh.calculatedRemotes.Store(cr)
|
||||||
|
if !initial {
|
||||||
|
//TODO: a diff will be annoyingly difficult
|
||||||
|
lh.l.Info("lighthouse.calculated_remotes has changed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
|
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
|
||||||
if initial || c.HasChanged("static_host_map") {
|
if initial || c.HasChanged("static_host_map") || c.HasChanged("static_map.cadence") || c.HasChanged("static_map.network") || c.HasChanged("static_map.lookup_timeout") {
|
||||||
|
// Clean up. Entries still in the static_host_map will be re-built.
|
||||||
|
// Entries no longer present must have their (possible) background DNS goroutines stopped.
|
||||||
|
if existingStaticList := lh.staticList.Load(); existingStaticList != nil {
|
||||||
|
lh.RLock()
|
||||||
|
for staticVpnIp := range *existingStaticList {
|
||||||
|
if am, ok := lh.addrMap[staticVpnIp]; ok && am != nil {
|
||||||
|
am.hr.Cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lh.RUnlock()
|
||||||
|
}
|
||||||
|
// Build a new list based on current config.
|
||||||
staticList := make(map[iputil.VpnIp]struct{})
|
staticList := make(map[iputil.VpnIp]struct{})
|
||||||
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
|
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)), unsafe.Pointer(&staticList))
|
lh.staticList.Store(&staticList)
|
||||||
if !initial {
|
if !initial {
|
||||||
//TODO: we should remove any remote list entries for static hosts that were removed/modified?
|
//TODO: we should remove any remote list entries for static hosts that were removed/modified?
|
||||||
lh.l.Info("static_host_map has changed")
|
if c.HasChanged("static_host_map") {
|
||||||
|
lh.l.Info("static_host_map has changed")
|
||||||
|
}
|
||||||
|
if c.HasChanged("static_map.cadence") {
|
||||||
|
lh.l.Info("static_map.cadence has changed")
|
||||||
|
}
|
||||||
|
if c.HasChanged("static_map.network") {
|
||||||
|
lh.l.Info("static_map.network has changed")
|
||||||
|
}
|
||||||
|
if c.HasChanged("static_map.lookup_timeout") {
|
||||||
|
lh.l.Info("static_map.lookup_timeout has changed")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if initial || c.HasChanged("lighthouse.hosts") {
|
if initial || c.HasChanged("lighthouse.hosts") {
|
||||||
@@ -259,7 +309,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)), unsafe.Pointer(&lhMap))
|
lh.lighthouses.Store(&lhMap)
|
||||||
if !initial {
|
if !initial {
|
||||||
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
|
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
|
||||||
lh.l.Info("lighthouse.hosts has changed")
|
lh.l.Info("lighthouse.hosts has changed")
|
||||||
@@ -274,18 +324,18 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
lh.l.Info("Ignoring relays from config because am_relay is true")
|
lh.l.Info("Ignoring relays from config because am_relay is true")
|
||||||
}
|
}
|
||||||
relaysForMe := []iputil.VpnIp{}
|
relaysForMe := []iputil.VpnIp{}
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe))
|
lh.relaysForMe.Store(&relaysForMe)
|
||||||
case false:
|
case false:
|
||||||
relaysForMe := []iputil.VpnIp{}
|
relaysForMe := []iputil.VpnIp{}
|
||||||
for _, v := range c.GetStringSlice("relay.relays", nil) {
|
for _, v := range c.GetStringSlice("relay.relays", nil) {
|
||||||
lh.l.WithField("RelayIP", v).Info("Read relay from config")
|
lh.l.WithField("relay", v).Info("Read relay from config")
|
||||||
|
|
||||||
configRIP := net.ParseIP(v)
|
configRIP := net.ParseIP(v)
|
||||||
if configRIP != nil {
|
if configRIP != nil {
|
||||||
relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP))
|
relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe))
|
lh.relaysForMe.Store(&relaysForMe)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -323,7 +373,48 @@ func (lh *LightHouse) parseLighthouses(c *config.C, tunCidr *net.IPNet, lhMap ma
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getStaticMapCadence(c *config.C) (time.Duration, error) {
|
||||||
|
cadence := c.GetString("static_map.cadence", "30s")
|
||||||
|
d, err := time.ParseDuration(cadence)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStaticMapLookupTimeout(c *config.C) (time.Duration, error) {
|
||||||
|
lookupTimeout := c.GetString("static_map.lookup_timeout", "250ms")
|
||||||
|
d, err := time.ParseDuration(lookupTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStaticMapNetwork(c *config.C) (string, error) {
|
||||||
|
network := c.GetString("static_map.network", "ip4")
|
||||||
|
if network != "ip" && network != "ip4" && network != "ip6" {
|
||||||
|
return "", fmt.Errorf("static_map.network must be one of ip, ip4, or ip6")
|
||||||
|
}
|
||||||
|
return network, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error {
|
func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error {
|
||||||
|
d, err := getStaticMapCadence(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
network, err := getStaticMapNetwork(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup_timeout, err := getStaticMapLookupTimeout(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
shm := c.GetMap("static_host_map", map[interface{}]interface{}{})
|
shm := c.GetMap("static_host_map", map[interface{}]interface{}{})
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
@@ -339,21 +430,17 @@ func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList
|
|||||||
|
|
||||||
vpnIp := iputil.Ip2VpnIp(rip)
|
vpnIp := iputil.Ip2VpnIp(rip)
|
||||||
vals, ok := v.([]interface{})
|
vals, ok := v.([]interface{})
|
||||||
if ok {
|
if !ok {
|
||||||
for _, v := range vals {
|
vals = []interface{}{v}
|
||||||
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
|
}
|
||||||
if err != nil {
|
remoteAddrs := []string{}
|
||||||
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
|
for _, v := range vals {
|
||||||
}
|
remoteAddrs = append(remoteAddrs, fmt.Sprintf("%v", v))
|
||||||
lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
err := lh.addStaticRemotes(i, d, network, lookup_timeout, vpnIp, remoteAddrs, staticList)
|
||||||
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
|
|
||||||
}
|
|
||||||
lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList)
|
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
@@ -361,9 +448,9 @@ func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) Query(ip iputil.VpnIp, f udp.EncWriter) *RemoteList {
|
func (lh *LightHouse) Query(ip iputil.VpnIp) *RemoteList {
|
||||||
if !lh.IsLighthouseIP(ip) {
|
if !lh.IsLighthouseIP(ip) {
|
||||||
lh.QueryServer(ip, f)
|
lh.QueryServer(ip)
|
||||||
}
|
}
|
||||||
lh.RLock()
|
lh.RLock()
|
||||||
if v, ok := lh.addrMap[ip]; ok {
|
if v, ok := lh.addrMap[ip]; ok {
|
||||||
@@ -374,30 +461,14 @@ func (lh *LightHouse) Query(ip iputil.VpnIp, f udp.EncWriter) *RemoteList {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is asynchronous so no reply should be expected
|
// QueryServer is asynchronous so no reply should be expected
|
||||||
func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f udp.EncWriter) {
|
func (lh *LightHouse) QueryServer(ip iputil.VpnIp) {
|
||||||
if lh.amLighthouse {
|
// Don't put lighthouse ips in the query channel because we can't query lighthouses about lighthouses
|
||||||
|
if lh.amLighthouse || lh.IsLighthouseIP(ip) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if lh.IsLighthouseIP(ip) {
|
lh.queryChan <- ip
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a query to the lighthouses and hope for the best next time
|
|
||||||
query, err := NewLhQueryByInt(ip).Marshal()
|
|
||||||
if err != nil {
|
|
||||||
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
lighthouses := lh.GetLighthouses()
|
|
||||||
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
|
|
||||||
nb := make([]byte, 12, 12)
|
|
||||||
out := make([]byte, mtu)
|
|
||||||
for n := range lighthouses {
|
|
||||||
f.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList {
|
func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList {
|
||||||
@@ -460,43 +531,122 @@ func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
|
|||||||
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner
|
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner
|
||||||
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
|
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
|
||||||
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
|
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
|
||||||
//NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
|
// NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
|
||||||
func (lh *LightHouse) addStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr, staticList map[iputil.VpnIp]struct{}) {
|
func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, timeout time.Duration, vpnIp iputil.VpnIp, toAddrs []string, staticList map[iputil.VpnIp]struct{}) error {
|
||||||
|
lh.Lock()
|
||||||
|
am := lh.unlockedGetRemoteList(vpnIp)
|
||||||
|
am.Lock()
|
||||||
|
defer am.Unlock()
|
||||||
|
ctx := lh.ctx
|
||||||
|
lh.Unlock()
|
||||||
|
|
||||||
|
hr, err := NewHostnameResults(ctx, lh.l, d, network, timeout, toAddrs, func() {
|
||||||
|
// This callback runs whenever the DNS hostname resolver finds a different set of IP's
|
||||||
|
// in its resolution for hostnames.
|
||||||
|
am.Lock()
|
||||||
|
defer am.Unlock()
|
||||||
|
am.shouldRebuild = true
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
|
||||||
|
}
|
||||||
|
am.unlockedSetHostnamesResults(hr)
|
||||||
|
|
||||||
|
for _, addrPort := range hr.GetIPs() {
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case addrPort.Addr().Is4():
|
||||||
|
to := NewIp4AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
|
||||||
|
if !lh.unlockedShouldAddV4(vpnIp, to) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
am.unlockedPrependV4(lh.myVpnIp, to)
|
||||||
|
case addrPort.Addr().Is6():
|
||||||
|
to := NewIp6AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
|
||||||
|
if !lh.unlockedShouldAddV6(vpnIp, to) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
am.unlockedPrependV6(lh.myVpnIp, to)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark it as static in the caller provided map
|
||||||
|
staticList[vpnIp] = struct{}{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addCalculatedRemotes adds any calculated remotes based on the
|
||||||
|
// lighthouse.calculated_remotes configuration. It returns true if any
|
||||||
|
// calculated remotes were added
|
||||||
|
func (lh *LightHouse) addCalculatedRemotes(vpnIp iputil.VpnIp) bool {
|
||||||
|
tree := lh.getCalculatedRemotes()
|
||||||
|
if tree == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ok, calculatedRemotes := tree.MostSpecificContains(vpnIp)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var calculated []*Ip4AndPort
|
||||||
|
for _, cr := range calculatedRemotes {
|
||||||
|
c := cr.Apply(vpnIp)
|
||||||
|
if c != nil {
|
||||||
|
calculated = append(calculated, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
am := lh.unlockedGetRemoteList(vpnIp)
|
am := lh.unlockedGetRemoteList(vpnIp)
|
||||||
am.Lock()
|
am.Lock()
|
||||||
defer am.Unlock()
|
defer am.Unlock()
|
||||||
lh.Unlock()
|
lh.Unlock()
|
||||||
|
|
||||||
if ipv4 := toAddr.IP.To4(); ipv4 != nil {
|
am.unlockedSetV4(lh.myVpnIp, vpnIp, calculated, lh.unlockedShouldAddV4)
|
||||||
to := NewIp4AndPort(ipv4, uint32(toAddr.Port))
|
|
||||||
if !lh.unlockedShouldAddV4(vpnIp, to) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
am.unlockedPrependV4(lh.myVpnIp, to)
|
|
||||||
|
|
||||||
} else {
|
return len(calculated) > 0
|
||||||
to := NewIp6AndPort(toAddr.IP, uint32(toAddr.Port))
|
|
||||||
if !lh.unlockedShouldAddV6(vpnIp, to) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
am.unlockedPrependV6(lh.myVpnIp, to)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark it as static in the caller provided map
|
|
||||||
staticList[vpnIp] = struct{}{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// unlockedGetRemoteList assumes you have the lh lock
|
// unlockedGetRemoteList assumes you have the lh lock
|
||||||
func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
|
func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
|
||||||
am, ok := lh.addrMap[vpnIp]
|
am, ok := lh.addrMap[vpnIp]
|
||||||
if !ok {
|
if !ok {
|
||||||
am = NewRemoteList()
|
am = NewRemoteList(func(a netip.Addr) bool { return lh.shouldAdd(vpnIp, a) })
|
||||||
lh.addrMap[vpnIp] = am
|
lh.addrMap[vpnIp] = am
|
||||||
}
|
}
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) shouldAdd(vpnIp iputil.VpnIp, to netip.Addr) bool {
|
||||||
|
switch {
|
||||||
|
case to.Is4():
|
||||||
|
ipBytes := to.As4()
|
||||||
|
ip := iputil.Ip2VpnIp(ipBytes[:])
|
||||||
|
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, ip)
|
||||||
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
|
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case to.Is6():
|
||||||
|
ipBytes := to.As16()
|
||||||
|
|
||||||
|
hi := binary.BigEndian.Uint64(ipBytes[:8])
|
||||||
|
lo := binary.BigEndian.Uint64(ipBytes[8:])
|
||||||
|
allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, hi, lo)
|
||||||
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
|
lh.l.WithField("remoteIp", to).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't check our vpn network here because nebula does not support ipv6 on the inside
|
||||||
|
if !allow {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// unlockedShouldAddV4 checks if to is allowed by our allow list
|
// unlockedShouldAddV4 checks if to is allowed by our allow list
|
||||||
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
|
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
|
||||||
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
|
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
|
||||||
@@ -555,6 +705,14 @@ func NewIp4AndPort(ip net.IP, port uint32) *Ip4AndPort {
|
|||||||
return &ipp
|
return &ipp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewIp4AndPortFromNetIP(ip netip.Addr, port uint16) *Ip4AndPort {
|
||||||
|
v4Addr := ip.As4()
|
||||||
|
return &Ip4AndPort{
|
||||||
|
Ip: binary.BigEndian.Uint32(v4Addr[:]),
|
||||||
|
Port: uint32(port),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
||||||
return &Ip6AndPort{
|
return &Ip6AndPort{
|
||||||
Hi: binary.BigEndian.Uint64(ip[:8]),
|
Hi: binary.BigEndian.Uint64(ip[:8]),
|
||||||
@@ -563,6 +721,14 @@ func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewIp6AndPortFromNetIP(ip netip.Addr, port uint16) *Ip6AndPort {
|
||||||
|
ip6Addr := ip.As16()
|
||||||
|
return &Ip6AndPort{
|
||||||
|
Hi: binary.BigEndian.Uint64(ip6Addr[:8]),
|
||||||
|
Lo: binary.BigEndian.Uint64(ip6Addr[8:]),
|
||||||
|
Port: uint32(port),
|
||||||
|
}
|
||||||
|
}
|
||||||
func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr {
|
func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr {
|
||||||
ip := ipp.Ip
|
ip := ipp.Ip
|
||||||
return udp.NewAddr(
|
return udp.NewAddr(
|
||||||
@@ -575,33 +741,73 @@ func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
|
|||||||
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
|
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f udp.EncWriter) {
|
func (lh *LightHouse) startQueryWorker() {
|
||||||
lh.updateParentCtx = ctx
|
if lh.amLighthouse {
|
||||||
lh.updateUdp = f
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
nb := make([]byte, 12, 12)
|
||||||
|
out := make([]byte, mtu)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-lh.ctx.Done():
|
||||||
|
return
|
||||||
|
case ip := <-lh.queryChan:
|
||||||
|
lh.innerQueryServer(ip, nb, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) innerQueryServer(ip iputil.VpnIp, nb, out []byte) {
|
||||||
|
if lh.IsLighthouseIP(ip) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a query to the lighthouses and hope for the best next time
|
||||||
|
query, err := NewLhQueryByInt(ip).Marshal()
|
||||||
|
if err != nil {
|
||||||
|
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lighthouses := lh.GetLighthouses()
|
||||||
|
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
|
||||||
|
|
||||||
|
for n := range lighthouses {
|
||||||
|
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) StartUpdateWorker() {
|
||||||
interval := lh.GetUpdateInterval()
|
interval := lh.GetUpdateInterval()
|
||||||
if lh.amLighthouse || interval == 0 {
|
if lh.amLighthouse || interval == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
clockSource := time.NewTicker(time.Second * time.Duration(interval))
|
clockSource := time.NewTicker(time.Second * time.Duration(interval))
|
||||||
updateCtx, cancel := context.WithCancel(ctx)
|
updateCtx, cancel := context.WithCancel(lh.ctx)
|
||||||
lh.updateCancel = cancel
|
lh.updateCancel = cancel
|
||||||
defer clockSource.Stop()
|
|
||||||
|
|
||||||
for {
|
go func() {
|
||||||
lh.SendUpdate(f)
|
defer clockSource.Stop()
|
||||||
|
|
||||||
select {
|
for {
|
||||||
case <-updateCtx.Done():
|
lh.SendUpdate()
|
||||||
return
|
|
||||||
case <-clockSource.C:
|
select {
|
||||||
continue
|
case <-updateCtx.Done():
|
||||||
|
return
|
||||||
|
case <-clockSource.C:
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) SendUpdate(f udp.EncWriter) {
|
func (lh *LightHouse) SendUpdate() {
|
||||||
var v4 []*Ip4AndPort
|
var v4 []*Ip4AndPort
|
||||||
var v6 []*Ip6AndPort
|
var v6 []*Ip6AndPort
|
||||||
|
|
||||||
@@ -654,7 +860,7 @@ func (lh *LightHouse) SendUpdate(f udp.EncWriter) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for vpnIp := range lighthouses {
|
for vpnIp := range lighthouses {
|
||||||
f.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
|
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -706,7 +912,13 @@ func (lhh *LightHouseHandler) resetMeta() *NebulaMeta {
|
|||||||
return lhh.meta
|
return lhh.meta
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w udp.EncWriter) {
|
func lhHandleRequest(lhh *LightHouseHandler, f *Interface) udp.LightHouseHandlerFunc {
|
||||||
|
return func(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte) {
|
||||||
|
lhh.HandleRequest(rAddr, vpnIp, p, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w EncWriter) {
|
||||||
n := lhh.resetMeta()
|
n := lhh.resetMeta()
|
||||||
err := n.Unmarshal(p)
|
err := n.Unmarshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -733,15 +945,18 @@ func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp,
|
|||||||
lhh.handleHostQueryReply(n, vpnIp)
|
lhh.handleHostQueryReply(n, vpnIp)
|
||||||
|
|
||||||
case NebulaMeta_HostUpdateNotification:
|
case NebulaMeta_HostUpdateNotification:
|
||||||
lhh.handleHostUpdateNotification(n, vpnIp)
|
lhh.handleHostUpdateNotification(n, vpnIp, w)
|
||||||
|
|
||||||
case NebulaMeta_HostMovedNotification:
|
case NebulaMeta_HostMovedNotification:
|
||||||
case NebulaMeta_HostPunchNotification:
|
case NebulaMeta_HostPunchNotification:
|
||||||
lhh.handleHostPunchNotification(n, vpnIp, w)
|
lhh.handleHostPunchNotification(n, vpnIp, w)
|
||||||
|
|
||||||
|
case NebulaMeta_HostUpdateNotificationAck:
|
||||||
|
// noop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w udp.EncWriter) {
|
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w EncWriter) {
|
||||||
// Exit if we don't answer queries
|
// Exit if we don't answer queries
|
||||||
if !lhh.lh.amLighthouse {
|
if !lhh.lh.amLighthouse {
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
@@ -846,7 +1061,7 @@ func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, vpnIp iputil.V
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp) {
|
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
|
||||||
if !lhh.lh.amLighthouse {
|
if !lhh.lh.amLighthouse {
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
|
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
|
||||||
@@ -872,9 +1087,22 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp
|
|||||||
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
||||||
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
|
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
|
||||||
am.Unlock()
|
am.Unlock()
|
||||||
|
|
||||||
|
n = lhh.resetMeta()
|
||||||
|
n.Type = NebulaMeta_HostUpdateNotificationAck
|
||||||
|
n.Details.VpnIp = uint32(vpnIp)
|
||||||
|
ln, err := n.MarshalTo(lhh.pb)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host update ack")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lhh.lh.metricTx(NebulaMeta_HostUpdateNotificationAck, 1)
|
||||||
|
w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w udp.EncWriter) {
|
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
|
||||||
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -911,7 +1139,7 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp i
|
|||||||
if lhh.lh.punchy.GetRespond() {
|
if lhh.lh.punchy.GetRespond() {
|
||||||
queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(time.Second * 5)
|
time.Sleep(lhh.lh.punchy.GetRespondDelay())
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.Debugf("Sending a nebula test packet to vpn ip %s", queryVpnIp)
|
lhh.l.Debugf("Sending a nebula test packet to vpn ip %s", queryVpnIp)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -11,6 +12,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
//TODO: Add a test to ensure udpAddr is copied and not reused
|
//TODO: Add a test to ensure udpAddr is copied and not reused
|
||||||
@@ -53,30 +55,59 @@ func Test_lhStaticMapping(t *testing.T) {
|
|||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
|
||||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||||
_, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
|
_, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
lh2 := "10.128.0.3"
|
lh2 := "10.128.0.3"
|
||||||
c = config.NewC(l)
|
c = config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
|
||||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
|
||||||
_, err = NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
|
_, err = NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReloadLighthouseInterval(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16")
|
||||||
|
lh1 := "10.128.0.2"
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||||
|
"hosts": []interface{}{lh1},
|
||||||
|
"interval": "1s",
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||||
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
lh.ifce = &mockEncWriter{}
|
||||||
|
|
||||||
|
// The first one routine is kicked off by main.go currently, lets make sure that one dies
|
||||||
|
c.ReloadConfigString("lighthouse:\n interval: 5")
|
||||||
|
assert.Equal(t, int64(5), lh.interval.Load())
|
||||||
|
|
||||||
|
// Subsequent calls are killed off by the LightHouse.Reload function
|
||||||
|
c.ReloadConfigString("lighthouse:\n interval: 10")
|
||||||
|
assert.Equal(t, int64(10), lh.interval.Load())
|
||||||
|
|
||||||
|
// If this completes then nothing is stealing our reload routine
|
||||||
|
c.ReloadConfigString("lighthouse:\n interval: 11")
|
||||||
|
assert.Equal(t, int64(11), lh.interval.Load())
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
|
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
lh, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
if !assert.NoError(b, err) {
|
if !assert.NoError(b, err) {
|
||||||
b.Fatal()
|
b.Fatal()
|
||||||
}
|
}
|
||||||
|
|
||||||
hAddr := udp.NewAddrFromString("4.5.6.7:12345")
|
hAddr := udp.NewAddrFromString("4.5.6.7:12345")
|
||||||
hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
|
hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
|
||||||
lh.addrMap[3] = NewRemoteList()
|
lh.addrMap[3] = NewRemoteList(nil)
|
||||||
lh.addrMap[3].unlockedSetV4(
|
lh.addrMap[3].unlockedSetV4(
|
||||||
3,
|
3,
|
||||||
3,
|
3,
|
||||||
@@ -89,7 +120,7 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
|
|
||||||
rAddr := udp.NewAddrFromString("1.2.2.3:12345")
|
rAddr := udp.NewAddrFromString("1.2.2.3:12345")
|
||||||
rAddr2 := udp.NewAddrFromString("1.2.2.3:12346")
|
rAddr2 := udp.NewAddrFromString("1.2.2.3:12346")
|
||||||
lh.addrMap[2] = NewRemoteList()
|
lh.addrMap[2] = NewRemoteList(nil)
|
||||||
lh.addrMap[2].unlockedSetV4(
|
lh.addrMap[2].unlockedSetV4(
|
||||||
3,
|
3,
|
||||||
3,
|
3,
|
||||||
@@ -162,7 +193,7 @@ func TestLighthouse_Memory(t *testing.T) {
|
|||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
lhh := lh.NewRequestHandler()
|
lhh := lh.NewRequestHandler()
|
||||||
|
|
||||||
@@ -238,11 +269,20 @@ func TestLighthouse_reload(t *testing.T) {
|
|||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
c.Settings["static_host_map"] = map[interface{}]interface{}{"10.128.0.2": []interface{}{"1.1.1.1:4242"}}
|
nc := map[interface{}]interface{}{
|
||||||
lh.reload(c, false)
|
"static_host_map": map[interface{}]interface{}{
|
||||||
|
"10.128.0.2": []interface{}{"1.1.1.1:4242"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(nc)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
c.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
err = lh.reload(c, false)
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
|
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
|
||||||
@@ -372,11 +412,28 @@ type testEncWriter struct {
|
|||||||
metaFilter *NebulaMeta_MessageType
|
metaFilter *NebulaMeta_MessageType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) {
|
func (tw *testEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
||||||
}
|
}
|
||||||
func (tw *testEncWriter) Handshake(vpnIp iputil.VpnIp) {
|
func (tw *testEncWriter) Handshake(vpnIp iputil.VpnIp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tw *testEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, _, _ []byte) {
|
||||||
|
msg := &NebulaMeta{}
|
||||||
|
err := msg.Unmarshal(p)
|
||||||
|
if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
|
||||||
|
tw.lastReply = testLhReply{
|
||||||
|
nebType: t,
|
||||||
|
nebSubType: st,
|
||||||
|
vpnIp: hostinfo.vpnIp,
|
||||||
|
msg: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
|
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
|
||||||
msg := &NebulaMeta{}
|
msg := &NebulaMeta{}
|
||||||
err := msg.Unmarshal(p)
|
err := msg.Unmarshal(p)
|
||||||
|
|||||||
140
main.go
140
main.go
@@ -3,7 +3,6 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
@@ -19,7 +18,7 @@ import (
|
|||||||
|
|
||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
|
|
||||||
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, tunFd *int) (retcon *Control, reterr error) {
|
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, deviceFactory overlay.DeviceFactory) (retcon *Control, reterr error) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
// Automatically cancel the context if Main returns an error, to signal all created goroutines to quit.
|
// Automatically cancel the context if Main returns an error, to signal all created goroutines to quit.
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -46,7 +45,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
|
|
||||||
err := configLogger(l, c)
|
err := configLogger(l, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to configure the logger", nil, err)
|
return nil, util.ContextualizeIfNeeded("Failed to configure the logger", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
@@ -56,36 +55,31 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
caPool, err := loadCAFromConfig(l, c)
|
pki, err := NewPKIFromConfig(l, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//The errors coming out of loadCA are already nicely formatted
|
return nil, util.ContextualizeIfNeeded("Failed to load PKI from config", err)
|
||||||
return nil, util.NewContextualError("Failed to load ca from config", nil, err)
|
|
||||||
}
|
}
|
||||||
l.WithField("fingerprints", caPool.GetFingerprints()).Debug("Trusted CA fingerprints")
|
|
||||||
|
|
||||||
cs, err := NewCertStateFromConfig(c)
|
certificate := pki.GetCertState().Certificate
|
||||||
|
fw, err := NewFirewallFromConfig(l, certificate, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//The errors coming out of NewCertStateFromConfig are already nicely formatted
|
return nil, util.ContextualizeIfNeeded("Error while loading firewall rules", err)
|
||||||
return nil, util.NewContextualError("Failed to load certificate from config", nil, err)
|
|
||||||
}
|
}
|
||||||
l.WithField("cert", cs.certificate).Debug("Client nebula certificate")
|
l.WithField("firewallHashes", fw.GetRuleHashes()).Info("Firewall started")
|
||||||
|
|
||||||
fw, err := NewFirewallFromConfig(l, cs.certificate, c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, util.NewContextualError("Error while loading firewall rules", nil, err)
|
|
||||||
}
|
|
||||||
l.WithField("firewallHash", fw.GetRuleHash()).Info("Firewall started")
|
|
||||||
|
|
||||||
// TODO: make sure mask is 4 bytes
|
// TODO: make sure mask is 4 bytes
|
||||||
tunCidr := cs.certificate.Details.Ips[0]
|
tunCidr := certificate.Details.Ips[0]
|
||||||
|
|
||||||
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.ContextualizeIfNeeded("Error while creating SSH server", err)
|
||||||
|
}
|
||||||
wireSSHReload(l, ssh, c)
|
wireSSHReload(l, ssh, c)
|
||||||
var sshStart func()
|
var sshStart func()
|
||||||
if c.GetBool("sshd.enabled", false) {
|
if c.GetBool("sshd.enabled", false) {
|
||||||
sshStart, err = configSSH(l, ssh, c)
|
sshStart, err = configSSH(l, ssh, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Error while configuring the sshd", nil, err)
|
return nil, util.ContextualizeIfNeeded("Error while configuring the sshd", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,9 +128,13 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
if !configTest {
|
if !configTest {
|
||||||
c.CatchHUP(ctx)
|
c.CatchHUP(ctx)
|
||||||
|
|
||||||
tun, err = overlay.NewDeviceFromConfig(c, l, tunCidr, tunFd, routines)
|
if deviceFactory == nil {
|
||||||
|
deviceFactory = overlay.NewDeviceFromConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
tun, err = deviceFactory(c, l, tunCidr, routines)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to get a tun/tap device", nil, err)
|
return nil, util.ContextualizeIfNeeded("Failed to get a tun/tap device", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -147,17 +145,41 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set up our UDP listener
|
// set up our UDP listener
|
||||||
udpConns := make([]*udp.Conn, routines)
|
udpConns := make([]udp.Conn, routines)
|
||||||
port := c.GetInt("listen.port", 0)
|
port := c.GetInt("listen.port", 0)
|
||||||
|
|
||||||
if !configTest {
|
if !configTest {
|
||||||
|
rawListenHost := c.GetString("listen.host", "0.0.0.0")
|
||||||
|
var listenHost *net.IPAddr
|
||||||
|
if rawListenHost == "[::]" {
|
||||||
|
// Old guidance was to provide the literal `[::]` in `listen.host` but that won't resolve.
|
||||||
|
listenHost = &net.IPAddr{IP: net.IPv6zero}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
listenHost, err = net.ResolveIPAddr("ip", rawListenHost)
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.ContextualizeIfNeeded("Failed to resolve listen.host", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < routines; i++ {
|
for i := 0; i < routines; i++ {
|
||||||
udpServer, err := udp.NewListener(l, c.GetString("listen.host", "0.0.0.0"), port, routines > 1, c.GetInt("listen.batch", 64))
|
l.Infof("listening %q %d", listenHost.IP, port)
|
||||||
|
udpServer, err := udp.NewListener(l, listenHost.IP, port, routines > 1, c.GetInt("listen.batch", 64))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
||||||
}
|
}
|
||||||
udpServer.ReloadConfig(c)
|
udpServer.ReloadConfig(c)
|
||||||
udpConns[i] = udpServer
|
udpConns[i] = udpServer
|
||||||
|
|
||||||
|
// If port is dynamic, discover it before the next pass through the for loop
|
||||||
|
// This way all routines will use the same port correctly
|
||||||
|
if port == 0 {
|
||||||
|
uPort, err := udpServer.LocalAddr()
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.NewContextualError("Failed to get listening port", nil, err)
|
||||||
|
}
|
||||||
|
port = int(uPort.Port)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,7 +191,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
for _, rawPreferredRange := range rawPreferredRanges {
|
for _, rawPreferredRange := range rawPreferredRanges {
|
||||||
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to parse preferred ranges", nil, err)
|
return nil, util.ContextualizeIfNeeded("Failed to parse preferred ranges", err)
|
||||||
}
|
}
|
||||||
preferredRanges = append(preferredRanges, preferredRange)
|
preferredRanges = append(preferredRanges, preferredRange)
|
||||||
}
|
}
|
||||||
@@ -182,7 +204,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
if rawLocalRange != "" {
|
if rawLocalRange != "" {
|
||||||
_, localRange, err := net.ParseCIDR(rawLocalRange)
|
_, localRange, err := net.ParseCIDR(rawLocalRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to parse local_range", nil, err)
|
return nil, util.ContextualizeIfNeeded("Failed to parse local_range", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the entry for local_range was already specified in
|
// Check if the entry for local_range was already specified in
|
||||||
@@ -199,28 +221,18 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hostMap := NewHostMap(l, "main", tunCidr, preferredRanges)
|
hostMap := NewHostMap(l, tunCidr, preferredRanges)
|
||||||
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
|
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
|
||||||
|
|
||||||
l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created")
|
l.
|
||||||
|
WithField("network", hostMap.vpnCIDR.String()).
|
||||||
/*
|
WithField("preferredRanges", hostMap.preferredRanges).
|
||||||
config.SetDefault("promoter.interval", 10)
|
Info("Main HostMap created")
|
||||||
go hostMap.Promoter(config.GetInt("promoter.interval"))
|
|
||||||
*/
|
|
||||||
|
|
||||||
punchy := NewPunchyFromConfig(l, c)
|
punchy := NewPunchyFromConfig(l, c)
|
||||||
if punchy.GetPunch() && !configTest {
|
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
||||||
l.Info("UDP hole punching enabled")
|
if err != nil {
|
||||||
go hostMap.Punchy(ctx, udpConns[0])
|
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
||||||
}
|
|
||||||
|
|
||||||
lightHouse, err := NewLightHouseFromConfig(l, c, tunCidr, udpConns[0], punchy)
|
|
||||||
switch {
|
|
||||||
case errors.As(err, &util.ContextualError{}):
|
|
||||||
return nil, err
|
|
||||||
case err != nil:
|
|
||||||
return nil, util.NewContextualError("Failed to initialize lighthouse handler", nil, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var messageMetrics *MessageMetrics
|
var messageMetrics *MessageMetrics
|
||||||
@@ -241,13 +253,9 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
messageMetrics: messageMetrics,
|
messageMetrics: messageMetrics,
|
||||||
}
|
}
|
||||||
|
|
||||||
handshakeManager := NewHandshakeManager(l, tunCidr, preferredRanges, hostMap, lightHouse, udpConns[0], handshakeConfig)
|
handshakeManager := NewHandshakeManager(l, hostMap, lightHouse, udpConns[0], handshakeConfig)
|
||||||
lightHouse.handshakeTrigger = handshakeManager.trigger
|
lightHouse.handshakeTrigger = handshakeManager.trigger
|
||||||
|
|
||||||
//TODO: These will be reused for psk
|
|
||||||
//handshakeMACKey := config.GetString("handshake_mac.key", "")
|
|
||||||
//handshakeAcceptedMACKeys := config.GetStringSlice("handshake_mac.accepted_keys", []string{})
|
|
||||||
|
|
||||||
serveDns := false
|
serveDns := false
|
||||||
if c.GetBool("lighthouse.serve_dns", false) {
|
if c.GetBool("lighthouse.serve_dns", false) {
|
||||||
if c.GetBool("lighthouse.am_lighthouse", false) {
|
if c.GetBool("lighthouse.am_lighthouse", false) {
|
||||||
@@ -259,26 +267,29 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
|
|
||||||
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
||||||
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
||||||
|
|
||||||
ifConfig := &InterfaceConfig{
|
ifConfig := &InterfaceConfig{
|
||||||
HostMap: hostMap,
|
HostMap: hostMap,
|
||||||
Inside: tun,
|
Inside: tun,
|
||||||
Outside: udpConns[0],
|
Outside: udpConns[0],
|
||||||
certState: cs,
|
pki: pki,
|
||||||
Cipher: c.GetString("cipher", "aes"),
|
Cipher: c.GetString("cipher", "aes"),
|
||||||
Firewall: fw,
|
Firewall: fw,
|
||||||
ServeDns: serveDns,
|
ServeDns: serveDns,
|
||||||
HandshakeManager: handshakeManager,
|
HandshakeManager: handshakeManager,
|
||||||
lightHouse: lightHouse,
|
lightHouse: lightHouse,
|
||||||
checkInterval: checkInterval,
|
checkInterval: time.Second * time.Duration(checkInterval),
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
|
||||||
|
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||||
|
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||||
|
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||||
routines: routines,
|
routines: routines,
|
||||||
MessageMetrics: messageMetrics,
|
MessageMetrics: messageMetrics,
|
||||||
version: buildVersion,
|
version: buildVersion,
|
||||||
caPool: caPool,
|
|
||||||
disconnectInvalid: c.GetBool("pki.disconnect_invalid", false),
|
|
||||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||||
|
punchy: punchy,
|
||||||
|
|
||||||
ConntrackCacheTimeout: conntrackCacheTimeout,
|
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||||
l: l,
|
l: l,
|
||||||
@@ -303,21 +314,21 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
// TODO: Better way to attach these, probably want a new interface in InterfaceConfig
|
// TODO: Better way to attach these, probably want a new interface in InterfaceConfig
|
||||||
// I don't want to make this initial commit too far-reaching though
|
// I don't want to make this initial commit too far-reaching though
|
||||||
ifce.writers = udpConns
|
ifce.writers = udpConns
|
||||||
|
lightHouse.ifce = ifce
|
||||||
|
|
||||||
ifce.RegisterConfigChangeCallbacks(c)
|
ifce.RegisterConfigChangeCallbacks(c)
|
||||||
|
ifce.reloadDisconnectInvalid(c)
|
||||||
ifce.reloadSendRecvError(c)
|
ifce.reloadSendRecvError(c)
|
||||||
|
|
||||||
go handshakeManager.Run(ctx, ifce)
|
handshakeManager.f = ifce
|
||||||
go lightHouse.LhUpdateWorker(ctx, ifce)
|
go handshakeManager.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO - stats third-party modules start uncancellable goroutines. Update those libs to accept
|
// TODO - stats third-party modules start uncancellable goroutines. Update those libs to accept
|
||||||
// a context so that they can exit when the context is Done.
|
// a context so that they can exit when the context is Done.
|
||||||
statsStart, err := startStats(l, c, buildVersion, configTest)
|
statsStart, err := startStats(l, c, buildVersion, configTest)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to start stats emitter", nil, err)
|
return nil, util.ContextualizeIfNeeded("Failed to start stats emitter", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if configTest {
|
if configTest {
|
||||||
@@ -327,7 +338,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
//TODO: check if we _should_ be emitting stats
|
//TODO: check if we _should_ be emitting stats
|
||||||
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
||||||
|
|
||||||
attachCommands(l, ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
|
attachCommands(l, c, ssh, ifce)
|
||||||
|
|
||||||
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
||||||
var dnsStart func()
|
var dnsStart func()
|
||||||
@@ -336,5 +347,14 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
dnsStart = dnsMain(l, hostMap, c)
|
dnsStart = dnsMain(l, hostMap, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Control{ifce, l, cancel, sshStart, statsStart, dnsStart}, nil
|
return &Control{
|
||||||
|
ifce,
|
||||||
|
l,
|
||||||
|
ctx,
|
||||||
|
cancel,
|
||||||
|
sshStart,
|
||||||
|
statsStart,
|
||||||
|
dnsStart,
|
||||||
|
lightHouse.StartUpdateWorker,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -84,6 +84,7 @@ func newLighthouseMetrics() *MessageMetrics {
|
|||||||
NebulaMeta_HostQueryReply,
|
NebulaMeta_HostQueryReply,
|
||||||
NebulaMeta_HostUpdateNotification,
|
NebulaMeta_HostUpdateNotification,
|
||||||
NebulaMeta_HostPunchNotification,
|
NebulaMeta_HostPunchNotification,
|
||||||
|
NebulaMeta_HostUpdateNotificationAck,
|
||||||
}
|
}
|
||||||
for _, i := range used {
|
for _, i := range used {
|
||||||
h[i] = []metrics.Counter{metrics.GetOrRegisterCounter(fmt.Sprintf("lighthouse.%s.%s", t, i.String()), nil)}
|
h[i] = []metrics.Counter{metrics.GetOrRegisterCounter(fmt.Sprintf("lighthouse.%s.%s", t, i.String()), nil)}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user