mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 08:24:25 +01:00
Compare commits
78 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57eb80e9fb | ||
|
|
96f4dcaab8 | ||
|
|
6d8c5f437c | ||
|
|
165b671e70 | ||
|
|
6be0bad68a | ||
|
|
7ae3cd25f8 | ||
|
|
9a7ed57a3f | ||
|
|
eb9f22a8fa | ||
|
|
54a8499c7b | ||
|
|
419aaf2e36 | ||
|
|
1701087035 | ||
|
|
a9cb2e06f4 | ||
|
|
115b4b70b1 | ||
|
|
0707caedb4 | ||
|
|
bd9cc01d62 | ||
|
|
d1f786419c | ||
|
|
31ed9269d7 | ||
|
|
48eb63899f | ||
|
|
b26c13336f | ||
|
|
e0185c4b01 | ||
|
|
702e1c59bd | ||
|
|
5fe8f45d05 | ||
|
|
03e4a7f988 | ||
|
|
0b67b19771 | ||
|
|
a0d3b93ae5 | ||
|
|
58ec1f7a7b | ||
|
|
397fe5f879 | ||
|
|
9b03053191 | ||
|
|
3cb4e0ef57 | ||
|
|
e0553822b0 | ||
|
|
d3fe3efcb0 | ||
|
|
fd99ce9a71 | ||
|
|
6685856b5d | ||
|
|
a56a97e5c3 | ||
|
|
ee8e1348e9 | ||
|
|
1a6c657451 | ||
|
|
6b3d42efa5 | ||
|
|
2801fb2286 | ||
|
|
e28336c5db | ||
|
|
3e5c7e6860 | ||
|
|
8a82e0fb16 | ||
|
|
f0ef80500d | ||
|
|
61b784d2bb | ||
|
|
5da79e2a4c | ||
|
|
e1af37e46d | ||
|
|
6e0ae4f9a3 | ||
|
|
f0ac61c1f0 | ||
|
|
92cc32f844 | ||
|
|
2ea360e5e2 | ||
|
|
469ae78748 | ||
|
|
a06977bbd5 | ||
|
|
5bd8712946 | ||
|
|
0fc4d8192f | ||
|
|
5278b6f926 | ||
|
|
c177126ed0 | ||
|
|
c44da3abee | ||
|
|
b7e73da943 | ||
|
|
ff54bfd9f3 | ||
|
|
b5a85a6eb8 | ||
|
|
3ae242fa5f | ||
|
|
cb2ec861ea | ||
|
|
a3e6edf9c7 | ||
|
|
ad7222509d | ||
|
|
12dbbd3dd3 | ||
|
|
ec48298fe8 | ||
|
|
77769de1e6 | ||
|
|
022ae83a4a | ||
|
|
d4f9500ca5 | ||
|
|
9a8892c526 | ||
|
|
813b64ffb1 | ||
|
|
85f5849d0b | ||
|
|
9af242dc47 | ||
|
|
a800a48857 | ||
|
|
4c0ae3df5e | ||
|
|
feb3e1317f | ||
|
|
c2259f14a7 | ||
|
|
b1eeb5f3b8 | ||
|
|
2adf0ca1d1 |
57
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
57
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
name: "\U0001F41B Bug Report"
|
||||||
|
description: Report an issue or possible bug
|
||||||
|
title: "\U0001F41B BUG:"
|
||||||
|
labels: []
|
||||||
|
assignees: []
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
### Thank you for taking the time to file a bug report!
|
||||||
|
|
||||||
|
Please fill out this form as completely as possible.
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: What version of `nebula` are you using?
|
||||||
|
placeholder: 0.0.0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: os
|
||||||
|
attributes:
|
||||||
|
label: What operating system are you using?
|
||||||
|
description: iOS and Android specific issues belong in the [mobile_nebula](https://github.com/DefinedNet/mobile_nebula) repo.
|
||||||
|
placeholder: Linux, Mac, Windows
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Describe the Bug
|
||||||
|
description: A clear and concise description of what the bug is.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Logs from affected hosts
|
||||||
|
description: |
|
||||||
|
Provide logs from all affected hosts during the time of the issue.
|
||||||
|
Improve formatting by using <code>```</code> at the beginning and end of each log block.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: configs
|
||||||
|
attributes:
|
||||||
|
label: Config files from affected hosts
|
||||||
|
description: |
|
||||||
|
Provide config files for all affected hosts.
|
||||||
|
Improve formatting by using <code>```</code> at the beginning and end of each config file.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
13
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
13
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: 📘 Documentation
|
||||||
|
url: https://nebula.defined.net/docs/
|
||||||
|
about: Review documentation.
|
||||||
|
|
||||||
|
- name: 💁 Support/Chat
|
||||||
|
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
|
||||||
|
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
|
||||||
|
|
||||||
|
- name: 📱 Mobile Nebula
|
||||||
|
url: https://github.com/definednet/mobile_nebula
|
||||||
|
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'
|
||||||
24
.github/workflows/gofmt.yml
vendored
24
.github/workflows/gofmt.yml
vendored
@@ -14,31 +14,21 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v3
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- uses: actions/setup-go@v4
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
go-version-file: 'go.mod'
|
||||||
key: ${{ runner.os }}-gofmt1.18-${{ hashFiles('**/go.sum') }}
|
check-latest: true
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-gofmt1.18-
|
|
||||||
|
|
||||||
- name: Install goimports
|
- name: Install goimports
|
||||||
run: |
|
run: |
|
||||||
go get golang.org/x/tools/cmd/goimports
|
go install golang.org/x/tools/cmd/goimports@latest
|
||||||
go build golang.org/x/tools/cmd/goimports
|
|
||||||
|
|
||||||
- name: gofmt
|
- name: gofmt
|
||||||
run: |
|
run: |
|
||||||
if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -l)" ]
|
if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs goimports -l)" ]
|
||||||
then
|
then
|
||||||
find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -d
|
find . -iname '*.go' | grep -v '\.pb\.go$' | xargs goimports -d
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
33
.github/workflows/release.yml
vendored
33
.github/workflows/release.yml
vendored
@@ -10,13 +10,12 @@ jobs:
|
|||||||
name: Build Linux All
|
name: Build Linux All
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v3
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
|
|
||||||
- name: Checkout code
|
- uses: actions/setup-go@v4
|
||||||
uses: actions/checkout@v2
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -34,13 +33,12 @@ jobs:
|
|||||||
name: Build Windows
|
name: Build Windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v3
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
|
|
||||||
- name: Checkout code
|
- uses: actions/setup-go@v4
|
||||||
uses: actions/checkout@v2
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -68,13 +66,12 @@ jobs:
|
|||||||
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
||||||
runs-on: macos-11
|
runs-on: macos-11
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v3
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
|
|
||||||
- name: Checkout code
|
- uses: actions/setup-go@v4
|
||||||
uses: actions/checkout@v2
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
- name: Import certificates
|
- name: Import certificates
|
||||||
if: env.HAS_SIGNING_CREDS == 'true'
|
if: env.HAS_SIGNING_CREDS == 'true'
|
||||||
|
|||||||
25
.github/workflows/smoke.yml
vendored
25
.github/workflows/smoke.yml
vendored
@@ -18,21 +18,12 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v3
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- uses: actions/setup-go@v4
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
go-version-file: 'go.mod'
|
||||||
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
|
check-latest: true
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go1.18-
|
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
run: make bin-docker
|
run: make bin-docker
|
||||||
@@ -53,4 +44,12 @@ jobs:
|
|||||||
working-directory: ./.github/workflows/smoke
|
working-directory: ./.github/workflows/smoke
|
||||||
run: ./smoke-relay.sh
|
run: ./smoke-relay.sh
|
||||||
|
|
||||||
|
- name: setup docker image for P256
|
||||||
|
working-directory: ./.github/workflows/smoke
|
||||||
|
run: NAME="smoke-p256" CURVE=P256 ./build.sh
|
||||||
|
|
||||||
|
- name: run smoke-p256
|
||||||
|
working-directory: ./.github/workflows/smoke
|
||||||
|
run: NAME="smoke-p256" ./smoke.sh
|
||||||
|
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
|
|||||||
4
.github/workflows/smoke/Dockerfile
vendored
4
.github/workflows/smoke/Dockerfile
vendored
@@ -1,4 +1,6 @@
|
|||||||
FROM debian:buster
|
FROM ubuntu:jammy
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y iputils-ping ncat tcpdump
|
||||||
|
|
||||||
ADD ./build /nebula
|
ADD ./build /nebula
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/smoke/build.sh
vendored
4
.github/workflows/smoke/build.sh
vendored
@@ -29,11 +29,11 @@ mkdir ./build
|
|||||||
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||||
../genconfig.sh >host4.yml
|
../genconfig.sh >host4.yml
|
||||||
|
|
||||||
../../../../nebula-cert ca -name "Smoke Test"
|
../../../../nebula-cert ca -curve "${CURVE:-25519}" -name "Smoke Test"
|
||||||
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
|
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
|
||||||
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
|
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
|
||||||
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
|
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
|
||||||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||||
)
|
)
|
||||||
|
|
||||||
sudo docker build -t nebula:smoke .
|
sudo docker build -t "nebula:${NAME:-smoke}" .
|
||||||
|
|||||||
2
.github/workflows/smoke/genconfig.sh
vendored
2
.github/workflows/smoke/genconfig.sh
vendored
@@ -50,6 +50,8 @@ tun:
|
|||||||
dev: ${TUN_DEV:-nebula1}
|
dev: ${TUN_DEV:-nebula1}
|
||||||
|
|
||||||
firewall:
|
firewall:
|
||||||
|
inbound_action: reject
|
||||||
|
outbound_action: reject
|
||||||
outbound: ${OUTBOUND:-$FIREWALL_ALL}
|
outbound: ${OUTBOUND:-$FIREWALL_ALL}
|
||||||
inbound: ${INBOUND:-$FIREWALL_ALL}
|
inbound: ${INBOUND:-$FIREWALL_ALL}
|
||||||
|
|
||||||
|
|||||||
61
.github/workflows/smoke/smoke.sh
vendored
61
.github/workflows/smoke/smoke.sh
vendored
@@ -20,20 +20,37 @@ cleanup() {
|
|||||||
|
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
|
|
||||||
sudo docker run --name lighthouse1 --rm nebula:smoke -config lighthouse1.yml -test
|
CONTAINER="nebula:${NAME:-smoke}"
|
||||||
sudo docker run --name host2 --rm nebula:smoke -config host2.yml -test
|
|
||||||
sudo docker run --name host3 --rm nebula:smoke -config host3.yml -test
|
|
||||||
sudo docker run --name host4 --rm nebula:smoke -config host4.yml -test
|
|
||||||
|
|
||||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||||
|
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||||
|
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||||
|
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||||
|
|
||||||
|
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
|
# grab tcpdump pcaps for debugging
|
||||||
|
sudo docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||||
|
sudo docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||||
|
sudo docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||||
|
sudo docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||||
|
sudo docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||||
|
sudo docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||||
|
sudo docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||||
|
sudo docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||||
|
|
||||||
|
sudo docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||||
|
sudo docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||||
|
sudo docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||||
|
sudo docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from lighthouse1"
|
echo " *** Testing ping from lighthouse1"
|
||||||
@@ -51,6 +68,15 @@ sudo docker exec host2 ping -c1 192.168.100.1
|
|||||||
# Should fail because not allowed by host3 inbound firewall
|
# Should fail because not allowed by host3 inbound firewall
|
||||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing ncat from host2"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
# Should fail because not allowed by host3 inbound firewall
|
||||||
|
! sudo docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||||
|
! sudo docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host3"
|
echo " *** Testing ping from host3"
|
||||||
@@ -59,6 +85,14 @@ set -x
|
|||||||
sudo docker exec host3 ping -c1 192.168.100.1
|
sudo docker exec host3 ping -c1 192.168.100.1
|
||||||
sudo docker exec host3 ping -c1 192.168.100.2
|
sudo docker exec host3 ping -c1 192.168.100.2
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing ncat from host3"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
sudo docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||||
|
sudo docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host4"
|
echo " *** Testing ping from host4"
|
||||||
@@ -69,6 +103,17 @@ sudo docker exec host4 ping -c1 192.168.100.1
|
|||||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||||
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing ncat from host4"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
# Should fail because not allowed by host4 outbound firewall
|
||||||
|
! sudo docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||||
|
! sudo docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||||
|
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||||
|
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing conntrack"
|
echo " *** Testing conntrack"
|
||||||
|
|||||||
63
.github/workflows/test.yml
vendored
63
.github/workflows/test.yml
vendored
@@ -18,25 +18,19 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v3
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- uses: actions/setup-go@v4
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
go-version-file: 'go.mod'
|
||||||
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
|
check-latest: true
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go1.18-
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make all
|
run: make all
|
||||||
|
|
||||||
|
- name: Vet
|
||||||
|
run: make vet
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
@@ -49,6 +43,27 @@ jobs:
|
|||||||
path: e2e/mermaid/
|
path: e2e/mermaid/
|
||||||
if-no-files-found: warn
|
if-no-files-found: warn
|
||||||
|
|
||||||
|
test-linux-boringcrypto:
|
||||||
|
name: Build and test on linux with boringcrypto
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: make bin-boringcrypto
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: make test-boringcrypto
|
||||||
|
|
||||||
|
- name: End 2 end
|
||||||
|
run: make e2evv GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
|
|
||||||
test:
|
test:
|
||||||
name: Build and test on ${{ matrix.os }}
|
name: Build and test on ${{ matrix.os }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
@@ -57,21 +72,12 @@ jobs:
|
|||||||
os: [windows-latest, macos-11]
|
os: [windows-latest, macos-11]
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- uses: actions/checkout@v3
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.18
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- uses: actions/setup-go@v4
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
go-version-file: 'go.mod'
|
||||||
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
|
check-latest: true
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go1.18-
|
|
||||||
|
|
||||||
- name: Build nebula
|
- name: Build nebula
|
||||||
run: go build ./cmd/nebula
|
run: go build ./cmd/nebula
|
||||||
@@ -79,8 +85,11 @@ jobs:
|
|||||||
- name: Build nebula-cert
|
- name: Build nebula-cert
|
||||||
run: go build ./cmd/nebula-cert
|
run: go build ./cmd/nebula-cert
|
||||||
|
|
||||||
|
- name: Vet
|
||||||
|
run: make vet
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: go test -v ./...
|
run: make test
|
||||||
|
|
||||||
- name: End 2 end
|
- name: End 2 end
|
||||||
run: make e2evv
|
run: make e2evv
|
||||||
|
|||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -4,10 +4,14 @@
|
|||||||
/nebula-arm6
|
/nebula-arm6
|
||||||
/nebula-darwin
|
/nebula-darwin
|
||||||
/nebula.exe
|
/nebula.exe
|
||||||
/cert/*.crt
|
/nebula-cert.exe
|
||||||
/cert/*.key
|
|
||||||
/coverage.out
|
/coverage.out
|
||||||
/cpu.pprof
|
/cpu.pprof
|
||||||
/build
|
/build
|
||||||
/*.tar.gz
|
/*.tar.gz
|
||||||
/e2e/mermaid/
|
/e2e/mermaid/
|
||||||
|
**.crt
|
||||||
|
**.key
|
||||||
|
**.pem
|
||||||
|
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key
|
||||||
|
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt
|
||||||
|
|||||||
109
CHANGELOG.md
109
CHANGELOG.md
@@ -7,6 +7,109 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [1.7.2] - 2023-06-01
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix a freeze during config reload if the `static_host_map` config was changed. (#886)
|
||||||
|
|
||||||
|
## [1.7.1] - 2023-05-18
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix IPv4 addresses returned by `static_host_map` DNS lookup queries being
|
||||||
|
treated as IPv6 addresses. (#877)
|
||||||
|
|
||||||
|
## [1.7.0] - 2023-05-17
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `nebula-cert ca` now supports encrypting the CA's private key with a
|
||||||
|
passphrase. Pass `-encrypt` in order to be prompted for a passphrase.
|
||||||
|
Encryption is performed using AES-256-GCM and Argon2id for KDF. KDF
|
||||||
|
parameters default to RFC recommendations, but can be overridden via CLI
|
||||||
|
flags `-argon-memory`, `-argon-parallelism`, and `-argon-iterations`. (#386)
|
||||||
|
|
||||||
|
- Support for curve P256 and BoringCrypto has been added. See README section
|
||||||
|
"Curve P256 and BoringCrypto" for more details. (#865, #861, #769, #856, #803)
|
||||||
|
|
||||||
|
- New firewall rule `local_cidr`. This could be used to filter destinations
|
||||||
|
when using `unsafe_routes`. (#507)
|
||||||
|
|
||||||
|
- Add `unsafe_route` option `install`. This controls whether the route is
|
||||||
|
installed in the systems routing table. (#831)
|
||||||
|
|
||||||
|
- Add `tun.use_system_route_table` option. Set to true to manage unsafe routes
|
||||||
|
directly on the system route table with gateway routes instead of in Nebula
|
||||||
|
configuration files. This is only supported on Linux. (#839)
|
||||||
|
|
||||||
|
- The metric `certificate.ttl_seconds` is now exposed via stats. (#782)
|
||||||
|
|
||||||
|
- Add `punchy.respond_delay` option. This allows you to change the delay
|
||||||
|
before attempting punchy.respond. Default is 5 seconds. (#721)
|
||||||
|
|
||||||
|
- Added SSH commands to allow the capture of a mutex profile. (#737)
|
||||||
|
|
||||||
|
- You can now set `lighthouse.calculated_remotes` to make it possible to do
|
||||||
|
handshakes without a lighthouse in certain configurations. (#759)
|
||||||
|
|
||||||
|
- The firewall can be configured to send REJECT replies instead of the default
|
||||||
|
DROP behavior. (#738)
|
||||||
|
|
||||||
|
- For macOS, an example launchd configuration file is now provided. (#762)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Lighthouses and other `static_host_map` entries that use DNS names will now
|
||||||
|
be automatically refreshed to detect when the IP address changes. (#796)
|
||||||
|
|
||||||
|
- Lighthouses send ACK replies back to clients so that they do not fall into
|
||||||
|
connection testing as often by clients. (#851, #408)
|
||||||
|
|
||||||
|
- Allow the `listen.host` option to contain a hostname. (#825)
|
||||||
|
|
||||||
|
- When Nebula switches to a new certificate (such as via SIGHUP), we now
|
||||||
|
rehandshake with all existing tunnels. This allows firewall groups to be
|
||||||
|
updated and `pki.disconnect_invalid` to know about the new certificate
|
||||||
|
expiration time. (#838, #857, #842, #840, #835, #828, #820, #807)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Always disconnect blocklisted hosts, even if `pki.disconnect_invalid` is
|
||||||
|
not set. (#858)
|
||||||
|
|
||||||
|
- Dependencies updated and go1.20 required. (#780, #824, #855, #854)
|
||||||
|
|
||||||
|
- Fix possible race condition with relays. (#827)
|
||||||
|
|
||||||
|
- FreeBSD: Fix connection to the localhost's own Nebula IP. (#808)
|
||||||
|
|
||||||
|
- Normalize and document some common log field values. (#837, #811)
|
||||||
|
|
||||||
|
- Fix crash if you set unlucky values for the firewall timeout configuration
|
||||||
|
options. (#802)
|
||||||
|
|
||||||
|
- Make DNS queries case insensitive. (#793)
|
||||||
|
|
||||||
|
- Update example systemd configurations to want `nss-lookup`. (#791)
|
||||||
|
|
||||||
|
- Errors with SSH commands now go to the SSH tunnel instead of stderr. (#757)
|
||||||
|
|
||||||
|
- Fix a hang when shutting down Android. (#772)
|
||||||
|
|
||||||
|
## [1.6.1] - 2022-09-26
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Refuse to process underlay packets received from overlay IPs. This prevents
|
||||||
|
confusion on hosts that have unsafe routes configured. (#741)
|
||||||
|
|
||||||
|
- The ssh `reload` command did not work on Windows, since it relied on sending
|
||||||
|
a SIGHUP signal internally. This has been fixed. (#725)
|
||||||
|
|
||||||
|
- A regression in v1.5.2 that broke unsafe routes on Mobile clients has been
|
||||||
|
fixed. (#729)
|
||||||
|
|
||||||
## [1.6.0] - 2022-06-30
|
## [1.6.0] - 2022-06-30
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@@ -385,7 +488,11 @@ created.)
|
|||||||
|
|
||||||
- Initial public release.
|
- Initial public release.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.6.0...HEAD
|
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.7.2...HEAD
|
||||||
|
[1.7.2]: https://github.com/slackhq/nebula/releases/tag/v1.7.2
|
||||||
|
[1.7.1]: https://github.com/slackhq/nebula/releases/tag/v1.7.1
|
||||||
|
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
|
||||||
|
[1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1
|
||||||
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0
|
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0
|
||||||
[1.5.2]: https://github.com/slackhq/nebula/releases/tag/v1.5.2
|
[1.5.2]: https://github.com/slackhq/nebula/releases/tag/v1.5.2
|
||||||
[1.5.0]: https://github.com/slackhq/nebula/releases/tag/v1.5.0
|
[1.5.0]: https://github.com/slackhq/nebula/releases/tag/v1.5.0
|
||||||
|
|||||||
38
LOGGING.md
Normal file
38
LOGGING.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
### Logging conventions
|
||||||
|
|
||||||
|
A log message (the string/format passed to `Info`, `Error`, `Debug` etc, as well as their `Sprintf` counterparts) should
|
||||||
|
be a descriptive message about the event and may contain specific identifying characteristics. Regardless of the
|
||||||
|
level of detail in the message identifying characteristics should always be included via `WithField`, `WithFields` or
|
||||||
|
`WithError`
|
||||||
|
|
||||||
|
If an error is being logged use `l.WithError(err)` so that there is better discoverability about the event as well
|
||||||
|
as the specific error condition.
|
||||||
|
|
||||||
|
#### Common fields
|
||||||
|
|
||||||
|
- `cert` - a `cert.NebulaCertificate` object, do not `.String()` this manually, `logrus` will marshal objects properly
|
||||||
|
for the formatter it is using.
|
||||||
|
- `fingerprint` - a single `NebeulaCertificate` hex encoded fingerprint
|
||||||
|
- `fingerprints` - an array of `NebulaCertificate` hex encoded fingerprints
|
||||||
|
- `fwPacket` - a FirewallPacket object
|
||||||
|
- `handshake` - an object containing:
|
||||||
|
- `stage` - the current stage counter
|
||||||
|
- `style` - noise handshake style `ix_psk0`, `xx`, etc
|
||||||
|
- `header` - a nebula header object
|
||||||
|
- `udpAddr` - a `net.UDPAddr` object
|
||||||
|
- `udpIp` - a udp ip address
|
||||||
|
- `vpnIp` - vpn ip of the host (remote or local)
|
||||||
|
- `relay` - the vpnIp of the relay host that is or should be handling the relay packet
|
||||||
|
- `relayFrom` - The vpnIp of the initial sender of the relayed packet
|
||||||
|
- `relayTo` - The vpnIp of the final destination of a relayed packet
|
||||||
|
|
||||||
|
#### Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
l.WithError(err).
|
||||||
|
WithField("vpnIp", IntIp(hostinfo.hostId)).
|
||||||
|
WithField("udpAddr", addr).
|
||||||
|
WithField("handshake", m{"stage": 1, "style": "ix"}).
|
||||||
|
WithField("cert", remoteCert).
|
||||||
|
Info("Invalid certificate from host")
|
||||||
|
```
|
||||||
19
Makefile
19
Makefile
@@ -1,4 +1,4 @@
|
|||||||
GOMINVERSION = 1.18
|
GOMINVERSION = 1.20
|
||||||
NEBULA_CMD_PATH = "./cmd/nebula"
|
NEBULA_CMD_PATH = "./cmd/nebula"
|
||||||
GO111MODULE = on
|
GO111MODULE = on
|
||||||
export GO111MODULE
|
export GO111MODULE
|
||||||
@@ -66,6 +66,9 @@ e2evvv: e2ev
|
|||||||
e2evvvv: TEST_ENV += TEST_LOGS=3
|
e2evvvv: TEST_ENV += TEST_LOGS=3
|
||||||
e2evvvv: e2ev
|
e2evvvv: e2ev
|
||||||
|
|
||||||
|
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
|
||||||
|
e2e-bench: e2e
|
||||||
|
|
||||||
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
||||||
|
|
||||||
release: $(ALL:%=build/nebula-%.tar.gz)
|
release: $(ALL:%=build/nebula-%.tar.gz)
|
||||||
@@ -74,6 +77,8 @@ release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
|||||||
|
|
||||||
release-freebsd: build/nebula-freebsd-amd64.tar.gz
|
release-freebsd: build/nebula-freebsd-amd64.tar.gz
|
||||||
|
|
||||||
|
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
|
||||||
|
|
||||||
BUILD_ARGS = -trimpath
|
BUILD_ARGS = -trimpath
|
||||||
|
|
||||||
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
|
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
|
||||||
@@ -88,6 +93,9 @@ bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
|
|||||||
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
|
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
|
||||||
mv $? .
|
mv $? .
|
||||||
|
|
||||||
|
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
|
||||||
|
mv $? .
|
||||||
|
|
||||||
bin:
|
bin:
|
||||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
||||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
||||||
@@ -102,6 +110,10 @@ build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
|
|||||||
# Build an extra small binary for mips-softfloat
|
# Build an extra small binary for mips-softfloat
|
||||||
build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
||||||
|
|
||||||
|
# boringcrypto
|
||||||
|
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
|
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
|
|
||||||
build/%/nebula: .FORCE
|
build/%/nebula: .FORCE
|
||||||
GOOS=$(firstword $(subst -, , $*)) \
|
GOOS=$(firstword $(subst -, , $*)) \
|
||||||
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
|
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
|
||||||
@@ -130,6 +142,9 @@ vet:
|
|||||||
test:
|
test:
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
|
|
||||||
|
test-boringcrypto:
|
||||||
|
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -v ./...
|
||||||
|
|
||||||
test-cov-html:
|
test-cov-html:
|
||||||
go test -coverprofile=coverage.out
|
go test -coverprofile=coverage.out
|
||||||
go tool cover -html=coverage.out
|
go tool cover -html=coverage.out
|
||||||
@@ -167,6 +182,8 @@ bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
|||||||
smoke-docker: bin-docker
|
smoke-docker: bin-docker
|
||||||
cd .github/workflows/smoke/ && ./build.sh
|
cd .github/workflows/smoke/ && ./build.sh
|
||||||
cd .github/workflows/smoke/ && ./smoke.sh
|
cd .github/workflows/smoke/ && ./smoke.sh
|
||||||
|
cd .github/workflows/smoke/ && NAME="smoke-p256" CURVE="P256" ./build.sh
|
||||||
|
cd .github/workflows/smoke/ && NAME="smoke-p256" ./smoke.sh
|
||||||
|
|
||||||
smoke-relay-docker: bin-docker
|
smoke-relay-docker: bin-docker
|
||||||
cd .github/workflows/smoke/ && ./build-relay.sh
|
cd .github/workflows/smoke/ && ./build-relay.sh
|
||||||
|
|||||||
25
README.md
25
README.md
@@ -8,7 +8,7 @@ and tunneling, and each of those individual pieces existed before Nebula in vari
|
|||||||
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
||||||
resulting in a sum that is greater than its individual parts.
|
resulting in a sum that is greater than its individual parts.
|
||||||
|
|
||||||
Further documentation can be found [here](https://www.defined.net/nebula/).
|
Further documentation can be found [here](https://nebula.defined.net/docs/).
|
||||||
|
|
||||||
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
||||||
|
|
||||||
@@ -31,12 +31,16 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
|||||||
```
|
```
|
||||||
$ sudo pacman -S nebula
|
$ sudo pacman -S nebula
|
||||||
```
|
```
|
||||||
- [Fedora Linux](https://copr.fedorainfracloud.org/coprs/jdoss/nebula/)
|
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
||||||
```
|
```
|
||||||
$ sudo dnf copr enable jdoss/nebula
|
|
||||||
$ sudo dnf install nebula
|
$ sudo dnf install nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
|
||||||
|
```
|
||||||
|
$ brew install nebula
|
||||||
|
```
|
||||||
|
|
||||||
#### Mobile
|
#### Mobile
|
||||||
|
|
||||||
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
||||||
@@ -93,13 +97,13 @@ Download a copy of the nebula [example configuration](https://github.com/slackhq
|
|||||||
|
|
||||||
#### 6. Copy nebula credentials, configuration, and binaries to each host
|
#### 6. Copy nebula credentials, configuration, and binaries to each host
|
||||||
|
|
||||||
For each host, copy the nebula binary to the host, along with `config.yaml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4.
|
For each host, copy the nebula binary to the host, along with `config.yml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4.
|
||||||
|
|
||||||
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
|
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
|
||||||
|
|
||||||
#### 7. Run nebula on each host
|
#### 7. Run nebula on each host
|
||||||
```
|
```
|
||||||
./nebula -config /path/to/config.yaml
|
./nebula -config /path/to/config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Building Nebula from source
|
## Building Nebula from source
|
||||||
@@ -114,6 +118,17 @@ To build nebula for a specific platform (ex, Windows):
|
|||||||
|
|
||||||
See the [Makefile](Makefile) for more details on build targets
|
See the [Makefile](Makefile) for more details on build targets
|
||||||
|
|
||||||
|
## Curve P256 and BoringCrypto
|
||||||
|
|
||||||
|
The default curve used for cryptographic handshakes and signatures is Curve25519. This is the recommended setting for most users. If your deployment has certain compliance requirements, you have the option of creating your CA using `nebula-cert ca -curve P256` to use NIST Curve P256. The CA will then sign certificates using ECDSA P256, and any hosts using these certificates will use P256 for ECDH handshakes.
|
||||||
|
|
||||||
|
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
|
||||||
|
|
||||||
|
make bin-boringcrypto
|
||||||
|
make release-boringcrypto
|
||||||
|
|
||||||
|
This is not the recommended default deployment, but may be useful based on your compliance requirements.
|
||||||
|
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.
|
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.
|
||||||
|
|||||||
12
SECURITY.md
Normal file
12
SECURITY.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Security Policy
|
||||||
|
===============
|
||||||
|
|
||||||
|
Reporting a Vulnerability
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
If you believe you have found a security vulnerability with Nebula, please let
|
||||||
|
us know right away. We will investigate all reports and do our best to quickly
|
||||||
|
fix valid issues.
|
||||||
|
|
||||||
|
You can submit your report on [HackerOne](https://hackerone.com/slack) and our
|
||||||
|
security team will respond as soon as possible.
|
||||||
8
boring.go
Normal file
8
boring.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
//go:build boringcrypto
|
||||||
|
// +build boringcrypto
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
import "crypto/boring"
|
||||||
|
|
||||||
|
var boringEnabled = boring.Enabled
|
||||||
143
calculated_remote.go
Normal file
143
calculated_remote.go
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This allows us to "guess" what the remote might be for a host while we wait
|
||||||
|
// for the lighthouse response. See "lighthouse.calculated_remotes" in the
|
||||||
|
// example config file.
|
||||||
|
type calculatedRemote struct {
|
||||||
|
ipNet net.IPNet
|
||||||
|
maskIP iputil.VpnIp
|
||||||
|
mask iputil.VpnIp
|
||||||
|
port uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCalculatedRemote(ipNet *net.IPNet, port int) (*calculatedRemote, error) {
|
||||||
|
// Ensure this is an IPv4 mask that we expect
|
||||||
|
ones, bits := ipNet.Mask.Size()
|
||||||
|
if ones == 0 || bits != 32 {
|
||||||
|
return nil, fmt.Errorf("invalid mask: %v", ipNet)
|
||||||
|
}
|
||||||
|
if port < 0 || port > math.MaxUint16 {
|
||||||
|
return nil, fmt.Errorf("invalid port: %d", port)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &calculatedRemote{
|
||||||
|
ipNet: *ipNet,
|
||||||
|
maskIP: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
|
mask: iputil.Ip2VpnIp(ipNet.Mask),
|
||||||
|
port: uint32(port),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *calculatedRemote) String() string {
|
||||||
|
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
|
||||||
|
// Combine the masked bytes of the "mask" IP with the unmasked bytes
|
||||||
|
// of the overlay IP
|
||||||
|
masked := (c.maskIP & c.mask) | (ip & ^c.mask)
|
||||||
|
|
||||||
|
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error) {
|
||||||
|
value := c.Get(k)
|
||||||
|
if value == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatedRemotes := cidr.NewTree4()
|
||||||
|
|
||||||
|
rawMap, ok := value.(map[any]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||||
|
}
|
||||||
|
for rawKey, rawValue := range rawMap {
|
||||||
|
rawCIDR, ok := rawKey.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, err := newCalculatedRemotesListFromConfig(rawValue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatedRemotes.AddCIDR(ipNet, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
return calculatedRemotes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCalculatedRemotesListFromConfig(raw any) ([]*calculatedRemote, error) {
|
||||||
|
rawList, ok := raw.([]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("calculated_remotes entry has invalid type: %T", raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
var l []*calculatedRemote
|
||||||
|
for _, e := range rawList {
|
||||||
|
c, err := newCalculatedRemotesEntryFromConfig(e)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("calculated_remotes entry: %w", err)
|
||||||
|
}
|
||||||
|
l = append(l, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
||||||
|
rawMap, ok := raw.(map[any]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid type: %T", raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawValue := rawMap["mask"]
|
||||||
|
if rawValue == nil {
|
||||||
|
return nil, fmt.Errorf("missing mask: %v", rawMap)
|
||||||
|
}
|
||||||
|
rawMask, ok := rawValue.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
|
||||||
|
}
|
||||||
|
_, ipNet, err := net.ParseCIDR(rawMask)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid mask: %s", rawMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
var port int
|
||||||
|
rawValue = rawMap["port"]
|
||||||
|
if rawValue == nil {
|
||||||
|
return nil, fmt.Errorf("missing port: %v", rawMap)
|
||||||
|
}
|
||||||
|
switch v := rawValue.(type) {
|
||||||
|
case int:
|
||||||
|
port = v
|
||||||
|
case string:
|
||||||
|
port, err = strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid port: %s: %w", v, err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newCalculatedRemote(ipNet, port)
|
||||||
|
}
|
||||||
27
calculated_remote_test.go
Normal file
27
calculated_remote_test.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCalculatedRemoteApply(t *testing.T) {
|
||||||
|
_, ipNet, err := net.ParseCIDR("192.168.1.0/24")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c, err := newCalculatedRemote(ipNet, 4242)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
input := iputil.Ip2VpnIp([]byte{10, 0, 10, 182})
|
||||||
|
|
||||||
|
expected := &Ip4AndPort{
|
||||||
|
Ip: uint32(iputil.Ip2VpnIp([]byte{192, 168, 1, 182})),
|
||||||
|
Port: 4242,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, expected, c.Apply(input))
|
||||||
|
}
|
||||||
4
cert.go
4
cert.go
@@ -66,7 +66,7 @@ func NewCertStateFromConfig(c *config.C) (*CertState, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rawKey, _, err := cert.UnmarshalX25519PrivateKey(pemPrivateKey)
|
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
|
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
|
||||||
}
|
}
|
||||||
@@ -102,7 +102,7 @@ func NewCertStateFromConfig(c *config.C) (*CertState, error) {
|
|||||||
return nil, fmt.Errorf("no IPs encoded in certificate")
|
return nil, fmt.Errorf("no IPs encoded in certificate")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = nebulaCert.VerifyPrivateKey(rawKey); err != nil {
|
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil {
|
||||||
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
|
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
10
cert/ca.go
10
cert/ca.go
@@ -91,9 +91,15 @@ func (ncp *NebulaCAPool) ResetCertBlocklist() {
|
|||||||
ncp.certBlocklist = make(map[string]struct{})
|
ncp.certBlocklist = make(map[string]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
|
// NOTE: This uses an internal cache for Sha256Sum() that will not be invalidated
|
||||||
|
// automatically if you manually change any fields in the NebulaCertificate.
|
||||||
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
|
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
|
||||||
h, err := c.Sha256Sum()
|
return ncp.isBlocklistedWithCache(c, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
|
||||||
|
func (ncp *NebulaCAPool) isBlocklistedWithCache(c *NebulaCertificate, useCache bool) bool {
|
||||||
|
h, err := c.sha256SumWithCache(useCache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
477
cert/cert.go
477
cert/cert.go
@@ -2,35 +2,55 @@ package cert
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto"
|
"crypto/ecdh"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
"net"
|
"net"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/curve25519"
|
"golang.org/x/crypto/curve25519"
|
||||||
"golang.org/x/crypto/ed25519"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const publicKeyLen = 32
|
const publicKeyLen = 32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
CertBanner = "NEBULA CERTIFICATE"
|
CertBanner = "NEBULA CERTIFICATE"
|
||||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||||
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||||
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
||||||
|
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
||||||
|
|
||||||
|
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
||||||
|
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
||||||
|
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
|
||||||
|
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NebulaCertificate struct {
|
type NebulaCertificate struct {
|
||||||
Details NebulaCertificateDetails
|
Details NebulaCertificateDetails
|
||||||
Signature []byte
|
Signature []byte
|
||||||
|
|
||||||
|
// the cached hex string of the calculated sha256sum
|
||||||
|
// for VerifyWithCache
|
||||||
|
sha256sum atomic.Pointer[string]
|
||||||
|
|
||||||
|
// the cached public key bytes if they were verified as the signer
|
||||||
|
// for VerifyWithCache
|
||||||
|
signatureVerified atomic.Pointer[[]byte]
|
||||||
}
|
}
|
||||||
|
|
||||||
type NebulaCertificateDetails struct {
|
type NebulaCertificateDetails struct {
|
||||||
@@ -46,10 +66,25 @@ type NebulaCertificateDetails struct {
|
|||||||
|
|
||||||
// Map of groups for faster lookup
|
// Map of groups for faster lookup
|
||||||
InvertedGroups map[string]struct{}
|
InvertedGroups map[string]struct{}
|
||||||
|
|
||||||
|
Curve Curve
|
||||||
|
}
|
||||||
|
|
||||||
|
type NebulaEncryptedData struct {
|
||||||
|
EncryptionMetadata NebulaEncryptionMetadata
|
||||||
|
Ciphertext []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type NebulaEncryptionMetadata struct {
|
||||||
|
EncryptionAlgorithm string
|
||||||
|
Argon2Parameters Argon2Parameters
|
||||||
}
|
}
|
||||||
|
|
||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
|
|
||||||
|
// Returned if we try to unmarshal an encrypted private key without a passphrase
|
||||||
|
var ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
|
||||||
|
|
||||||
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert
|
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert
|
||||||
func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
|
func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
@@ -84,6 +119,7 @@ func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
|
|||||||
PublicKey: make([]byte, len(rc.Details.PublicKey)),
|
PublicKey: make([]byte, len(rc.Details.PublicKey)),
|
||||||
IsCA: rc.Details.IsCA,
|
IsCA: rc.Details.IsCA,
|
||||||
InvertedGroups: make(map[string]struct{}),
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
Curve: rc.Details.Curve,
|
||||||
},
|
},
|
||||||
Signature: make([]byte, len(rc.Signature)),
|
Signature: make([]byte, len(rc.Signature)),
|
||||||
}
|
}
|
||||||
@@ -134,6 +170,28 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
|
|||||||
return nc, r, err
|
return nc, r, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MarshalPrivateKey(curve Curve, b []byte) []byte {
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: P256PrivateKeyBanner, Bytes: b})
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalSigningPrivateKey(curve Curve, b []byte) []byte {
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: b})
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: ECDSAP256PrivateKeyBanner, Bytes: b})
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalX25519PrivateKey is a simple helper to PEM encode an X25519 private key
|
// MarshalX25519PrivateKey is a simple helper to PEM encode an X25519 private key
|
||||||
func MarshalX25519PrivateKey(b []byte) []byte {
|
func MarshalX25519PrivateKey(b []byte) []byte {
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
||||||
@@ -144,6 +202,87 @@ func MarshalEd25519PrivateKey(key ed25519.PrivateKey) []byte {
|
|||||||
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: key})
|
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: key})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UnmarshalPrivateKey(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
var expectedLen int
|
||||||
|
var curve Curve
|
||||||
|
switch k.Type {
|
||||||
|
case X25519PrivateKeyBanner:
|
||||||
|
expectedLen = 32
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
case P256PrivateKeyBanner:
|
||||||
|
expectedLen = 32
|
||||||
|
curve = Curve_P256
|
||||||
|
default:
|
||||||
|
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula private key banner")
|
||||||
|
}
|
||||||
|
if len(k.Bytes) != expectedLen {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s private key", expectedLen, curve)
|
||||||
|
}
|
||||||
|
return k.Bytes, r, curve, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalSigningPrivateKey(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
var curve Curve
|
||||||
|
switch k.Type {
|
||||||
|
case EncryptedEd25519PrivateKeyBanner:
|
||||||
|
return nil, nil, Curve_CURVE25519, ErrPrivateKeyEncrypted
|
||||||
|
case EncryptedECDSAP256PrivateKeyBanner:
|
||||||
|
return nil, nil, Curve_P256, ErrPrivateKeyEncrypted
|
||||||
|
case Ed25519PrivateKeyBanner:
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
if len(k.Bytes) != ed25519.PrivateKeySize {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid Ed25519 private key", ed25519.PrivateKeySize)
|
||||||
|
}
|
||||||
|
case ECDSAP256PrivateKeyBanner:
|
||||||
|
curve = Curve_P256
|
||||||
|
if len(k.Bytes) != 32 {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula Ed25519/ECDSA private key banner")
|
||||||
|
}
|
||||||
|
return k.Bytes, r, curve, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncryptAndMarshalSigningPrivateKey is a simple helper to encrypt and PEM encode a private key
|
||||||
|
func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte, kdfParams *Argon2Parameters) ([]byte, error) {
|
||||||
|
ciphertext, err := aes256Encrypt(passphrase, kdfParams, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = proto.Marshal(&RawNebulaEncryptedData{
|
||||||
|
EncryptionMetadata: &RawNebulaEncryptionMetadata{
|
||||||
|
EncryptionAlgorithm: "AES-256-GCM",
|
||||||
|
Argon2Parameters: &RawNebulaArgon2Parameters{
|
||||||
|
Version: kdfParams.version,
|
||||||
|
Memory: kdfParams.Memory,
|
||||||
|
Parallelism: uint32(kdfParams.Parallelism),
|
||||||
|
Iterations: kdfParams.Iterations,
|
||||||
|
Salt: kdfParams.salt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ciphertext: ciphertext,
|
||||||
|
})
|
||||||
|
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: EncryptedEd25519PrivateKeyBanner, Bytes: b}), nil
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: EncryptedECDSAP256PrivateKeyBanner, Bytes: b}), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid curve: %v", curve)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalX25519PrivateKey will try to pem decode an X25519 private key, returning any other bytes b
|
// UnmarshalX25519PrivateKey will try to pem decode an X25519 private key, returning any other bytes b
|
||||||
// or an error on failure
|
// or an error on failure
|
||||||
func UnmarshalX25519PrivateKey(b []byte) ([]byte, []byte, error) {
|
func UnmarshalX25519PrivateKey(b []byte) ([]byte, []byte, error) {
|
||||||
@@ -168,9 +307,13 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
|
|||||||
if k == nil {
|
if k == nil {
|
||||||
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
if k.Type != Ed25519PrivateKeyBanner {
|
|
||||||
|
if k.Type == EncryptedEd25519PrivateKeyBanner {
|
||||||
|
return nil, r, ErrPrivateKeyEncrypted
|
||||||
|
} else if k.Type != Ed25519PrivateKeyBanner {
|
||||||
return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 private key banner")
|
return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 private key banner")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(k.Bytes) != ed25519.PrivateKeySize {
|
if len(k.Bytes) != ed25519.PrivateKeySize {
|
||||||
return nil, r, fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
return nil, r, fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
||||||
}
|
}
|
||||||
@@ -178,6 +321,126 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
|
|||||||
return k.Bytes, r, nil
|
return k.Bytes, r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert into its
|
||||||
|
// protobuf-generated struct.
|
||||||
|
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return nil, fmt.Errorf("nil byte array")
|
||||||
|
}
|
||||||
|
var rned RawNebulaEncryptedData
|
||||||
|
err := proto.Unmarshal(b, &rned)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if rned.EncryptionMetadata == nil {
|
||||||
|
return nil, fmt.Errorf("encoded EncryptionMetadata was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rned.EncryptionMetadata.Argon2Parameters == nil {
|
||||||
|
return nil, fmt.Errorf("encoded Argon2Parameters was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := unmarshalArgon2Parameters(rned.EncryptionMetadata.Argon2Parameters)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ned := NebulaEncryptedData{
|
||||||
|
EncryptionMetadata: NebulaEncryptionMetadata{
|
||||||
|
EncryptionAlgorithm: rned.EncryptionMetadata.EncryptionAlgorithm,
|
||||||
|
Argon2Parameters: *params,
|
||||||
|
},
|
||||||
|
Ciphertext: rned.Ciphertext,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ned, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalArgon2Parameters(params *RawNebulaArgon2Parameters) (*Argon2Parameters, error) {
|
||||||
|
if params.Version < math.MinInt32 || params.Version > math.MaxInt32 {
|
||||||
|
return nil, fmt.Errorf("Argon2Parameters Version must be at least %d and no more than %d", math.MinInt32, math.MaxInt32)
|
||||||
|
}
|
||||||
|
if params.Memory <= 0 || params.Memory > math.MaxUint32 {
|
||||||
|
return nil, fmt.Errorf("Argon2Parameters Memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
if params.Parallelism <= 0 || params.Parallelism > math.MaxUint8 {
|
||||||
|
return nil, fmt.Errorf("Argon2Parameters Parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
|
||||||
|
}
|
||||||
|
if params.Iterations <= 0 || params.Iterations > math.MaxUint32 {
|
||||||
|
return nil, fmt.Errorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Argon2Parameters{
|
||||||
|
version: rune(params.Version),
|
||||||
|
Memory: uint32(params.Memory),
|
||||||
|
Parallelism: uint8(params.Parallelism),
|
||||||
|
Iterations: uint32(params.Iterations),
|
||||||
|
salt: params.Salt,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecryptAndUnmarshalSigningPrivateKey will try to pem decode and decrypt an Ed25519/ECDSA private key with
|
||||||
|
// the given passphrase, returning any other bytes b or an error on failure
|
||||||
|
func DecryptAndUnmarshalSigningPrivateKey(passphrase, b []byte) (Curve, []byte, []byte, error) {
|
||||||
|
var curve Curve
|
||||||
|
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return curve, nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k.Type {
|
||||||
|
case EncryptedEd25519PrivateKeyBanner:
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
case EncryptedECDSAP256PrivateKeyBanner:
|
||||||
|
curve = Curve_P256
|
||||||
|
default:
|
||||||
|
return curve, nil, r, fmt.Errorf("bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
||||||
|
}
|
||||||
|
|
||||||
|
ned, err := UnmarshalNebulaEncryptedData(k.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return curve, nil, r, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var bytes []byte
|
||||||
|
switch ned.EncryptionMetadata.EncryptionAlgorithm {
|
||||||
|
case "AES-256-GCM":
|
||||||
|
bytes, err = aes256Decrypt(passphrase, &ned.EncryptionMetadata.Argon2Parameters, ned.Ciphertext)
|
||||||
|
if err != nil {
|
||||||
|
return curve, nil, r, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return curve, nil, r, fmt.Errorf("unsupported encryption algorithm: %s", ned.EncryptionMetadata.EncryptionAlgorithm)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
if len(bytes) != ed25519.PrivateKeySize {
|
||||||
|
return curve, nil, r, fmt.Errorf("key was not %d bytes, is invalid ed25519 private key", ed25519.PrivateKeySize)
|
||||||
|
}
|
||||||
|
case Curve_P256:
|
||||||
|
if len(bytes) != 32 {
|
||||||
|
return curve, nil, r, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return curve, bytes, r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalPublicKey(curve Curve, b []byte) []byte {
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: P256PublicKeyBanner, Bytes: b})
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalX25519PublicKey is a simple helper to PEM encode an X25519 public key
|
// MarshalX25519PublicKey is a simple helper to PEM encode an X25519 public key
|
||||||
func MarshalX25519PublicKey(b []byte) []byte {
|
func MarshalX25519PublicKey(b []byte) []byte {
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
||||||
@@ -188,6 +451,30 @@ func MarshalEd25519PublicKey(key ed25519.PublicKey) []byte {
|
|||||||
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: key})
|
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: key})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UnmarshalPublicKey(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
|
k, r := pem.Decode(b)
|
||||||
|
if k == nil {
|
||||||
|
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
var expectedLen int
|
||||||
|
var curve Curve
|
||||||
|
switch k.Type {
|
||||||
|
case X25519PublicKeyBanner:
|
||||||
|
expectedLen = 32
|
||||||
|
curve = Curve_CURVE25519
|
||||||
|
case P256PublicKeyBanner:
|
||||||
|
// Uncompressed
|
||||||
|
expectedLen = 65
|
||||||
|
curve = Curve_P256
|
||||||
|
default:
|
||||||
|
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula public key banner")
|
||||||
|
}
|
||||||
|
if len(k.Bytes) != expectedLen {
|
||||||
|
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s public key", expectedLen, curve)
|
||||||
|
}
|
||||||
|
return k.Bytes, r, curve, nil
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalX25519PublicKey will try to pem decode an X25519 public key, returning any other bytes b
|
// UnmarshalX25519PublicKey will try to pem decode an X25519 public key, returning any other bytes b
|
||||||
// or an error on failure
|
// or an error on failure
|
||||||
func UnmarshalX25519PublicKey(b []byte) ([]byte, []byte, error) {
|
func UnmarshalX25519PublicKey(b []byte) ([]byte, []byte, error) {
|
||||||
@@ -223,27 +510,86 @@ func UnmarshalEd25519PublicKey(b []byte) (ed25519.PublicKey, []byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sign signs a nebula cert with the provided private key
|
// Sign signs a nebula cert with the provided private key
|
||||||
func (nc *NebulaCertificate) Sign(key ed25519.PrivateKey) error {
|
func (nc *NebulaCertificate) Sign(curve Curve, key []byte) error {
|
||||||
|
if curve != nc.Details.Curve {
|
||||||
|
return fmt.Errorf("curve in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
|
|
||||||
b, err := proto.Marshal(nc.getRawDetails())
|
b, err := proto.Marshal(nc.getRawDetails())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sig, err := key.Sign(rand.Reader, b, crypto.Hash(0))
|
var sig []byte
|
||||||
if err != nil {
|
|
||||||
return err
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
signer := ed25519.PrivateKey(key)
|
||||||
|
sig = ed25519.Sign(signer, b)
|
||||||
|
case Curve_P256:
|
||||||
|
signer := &ecdsa.PrivateKey{
|
||||||
|
PublicKey: ecdsa.PublicKey{
|
||||||
|
Curve: elliptic.P256(),
|
||||||
|
},
|
||||||
|
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L95
|
||||||
|
D: new(big.Int).SetBytes(key),
|
||||||
|
}
|
||||||
|
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L119
|
||||||
|
signer.X, signer.Y = signer.Curve.ScalarBaseMult(key)
|
||||||
|
|
||||||
|
// We need to hash first for ECDSA
|
||||||
|
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
|
||||||
|
hashed := sha256.Sum256(b)
|
||||||
|
sig, err = ecdsa.SignASN1(rand.Reader, signer, hashed[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", nc.Details.Curve)
|
||||||
}
|
}
|
||||||
|
|
||||||
nc.Signature = sig
|
nc.Signature = sig
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckSignature verifies the signature against the provided public key
|
// CheckSignature verifies the signature against the provided public key
|
||||||
func (nc *NebulaCertificate) CheckSignature(key ed25519.PublicKey) bool {
|
func (nc *NebulaCertificate) CheckSignature(key []byte) bool {
|
||||||
b, err := proto.Marshal(nc.getRawDetails())
|
b, err := proto.Marshal(nc.getRawDetails())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return ed25519.Verify(key, b, nc.Signature)
|
switch nc.Details.Curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return ed25519.Verify(ed25519.PublicKey(key), b, nc.Signature)
|
||||||
|
case Curve_P256:
|
||||||
|
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
||||||
|
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
||||||
|
hashed := sha256.Sum256(b)
|
||||||
|
return ecdsa.VerifyASN1(pubKey, hashed[:], nc.Signature)
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This uses an internal cache that will not be invalidated automatically
|
||||||
|
// if you manually change any fields in the NebulaCertificate.
|
||||||
|
func (nc *NebulaCertificate) checkSignatureWithCache(key []byte, useCache bool) bool {
|
||||||
|
if !useCache {
|
||||||
|
return nc.CheckSignature(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := nc.signatureVerified.Load(); v != nil {
|
||||||
|
return bytes.Equal(*v, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
verified := nc.CheckSignature(key)
|
||||||
|
if verified {
|
||||||
|
keyCopy := make([]byte, len(key))
|
||||||
|
copy(keyCopy, key)
|
||||||
|
nc.signatureVerified.Store(&keyCopy)
|
||||||
|
}
|
||||||
|
|
||||||
|
return verified
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expired will return true if the nebula cert is too young or too old compared to the provided time, otherwise false
|
// Expired will return true if the nebula cert is too young or too old compared to the provided time, otherwise false
|
||||||
@@ -253,8 +599,27 @@ func (nc *NebulaCertificate) Expired(t time.Time) bool {
|
|||||||
|
|
||||||
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
||||||
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
||||||
if ncp.IsBlocklisted(nc) {
|
return nc.verify(t, ncp, false)
|
||||||
return false, fmt.Errorf("certificate has been blocked")
|
}
|
||||||
|
|
||||||
|
// VerifyWithCache will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
||||||
|
//
|
||||||
|
// NOTE: This uses an internal cache that will not be invalidated automatically
|
||||||
|
// if you manually change any fields in the NebulaCertificate.
|
||||||
|
func (nc *NebulaCertificate) VerifyWithCache(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
||||||
|
return nc.verify(t, ncp, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetCache resets the cache used by VerifyWithCache.
|
||||||
|
func (nc *NebulaCertificate) ResetCache() {
|
||||||
|
nc.sha256sum.Store(nil)
|
||||||
|
nc.signatureVerified.Store(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
||||||
|
func (nc *NebulaCertificate) verify(t time.Time, ncp *NebulaCAPool, useCache bool) (bool, error) {
|
||||||
|
if ncp.isBlocklistedWithCache(nc, useCache) {
|
||||||
|
return false, ErrBlockListed
|
||||||
}
|
}
|
||||||
|
|
||||||
signer, err := ncp.GetCAForCert(nc)
|
signer, err := ncp.GetCAForCert(nc)
|
||||||
@@ -263,15 +628,15 @@ func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
if signer.Expired(t) {
|
if signer.Expired(t) {
|
||||||
return false, fmt.Errorf("root certificate is expired")
|
return false, ErrRootExpired
|
||||||
}
|
}
|
||||||
|
|
||||||
if nc.Expired(t) {
|
if nc.Expired(t) {
|
||||||
return false, fmt.Errorf("certificate is expired")
|
return false, ErrExpired
|
||||||
}
|
}
|
||||||
|
|
||||||
if !nc.CheckSignature(signer.Details.PublicKey) {
|
if !nc.checkSignatureWithCache(signer.Details.PublicKey, useCache) {
|
||||||
return false, fmt.Errorf("certificate signature did not match")
|
return false, ErrSignatureMismatch
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := nc.CheckRootConstrains(signer); err != nil {
|
if err := nc.CheckRootConstrains(signer); err != nil {
|
||||||
@@ -324,22 +689,52 @@ func (nc *NebulaCertificate) CheckRootConstrains(signer *NebulaCertificate) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
|
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
|
||||||
func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error {
|
func (nc *NebulaCertificate) VerifyPrivateKey(curve Curve, key []byte) error {
|
||||||
|
if curve != nc.Details.Curve {
|
||||||
|
return fmt.Errorf("curve in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
if nc.Details.IsCA {
|
if nc.Details.IsCA {
|
||||||
// the call to PublicKey below will panic slice bounds out of range otherwise
|
switch curve {
|
||||||
if len(key) != ed25519.PrivateKeySize {
|
case Curve_CURVE25519:
|
||||||
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
// the call to PublicKey below will panic slice bounds out of range otherwise
|
||||||
}
|
if len(key) != ed25519.PrivateKeySize {
|
||||||
|
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
||||||
|
}
|
||||||
|
|
||||||
if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
|
case Curve_P256:
|
||||||
|
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot parse private key as P256")
|
||||||
|
}
|
||||||
|
pub := privkey.PublicKey().Bytes()
|
||||||
|
if !bytes.Equal(pub, nc.Details.PublicKey) {
|
||||||
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", curve)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, err := curve25519.X25519(key, curve25519.Basepoint)
|
var pub []byte
|
||||||
if err != nil {
|
switch curve {
|
||||||
return err
|
case Curve_CURVE25519:
|
||||||
|
var err error
|
||||||
|
pub, err = curve25519.X25519(key, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case Curve_P256:
|
||||||
|
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pub = privkey.PublicKey().Bytes()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", curve)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(pub, nc.Details.PublicKey) {
|
if !bytes.Equal(pub, nc.Details.PublicKey) {
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
@@ -393,6 +788,7 @@ func (nc *NebulaCertificate) String() string {
|
|||||||
s += fmt.Sprintf("\t\tIs CA: %v\n", nc.Details.IsCA)
|
s += fmt.Sprintf("\t\tIs CA: %v\n", nc.Details.IsCA)
|
||||||
s += fmt.Sprintf("\t\tIssuer: %s\n", nc.Details.Issuer)
|
s += fmt.Sprintf("\t\tIssuer: %s\n", nc.Details.Issuer)
|
||||||
s += fmt.Sprintf("\t\tPublic key: %x\n", nc.Details.PublicKey)
|
s += fmt.Sprintf("\t\tPublic key: %x\n", nc.Details.PublicKey)
|
||||||
|
s += fmt.Sprintf("\t\tCurve: %s\n", nc.Details.Curve)
|
||||||
s += "\t}\n"
|
s += "\t}\n"
|
||||||
fp, err := nc.Sha256Sum()
|
fp, err := nc.Sha256Sum()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -413,6 +809,7 @@ func (nc *NebulaCertificate) getRawDetails() *RawNebulaCertificateDetails {
|
|||||||
NotAfter: nc.Details.NotAfter.Unix(),
|
NotAfter: nc.Details.NotAfter.Unix(),
|
||||||
PublicKey: make([]byte, len(nc.Details.PublicKey)),
|
PublicKey: make([]byte, len(nc.Details.PublicKey)),
|
||||||
IsCA: nc.Details.IsCA,
|
IsCA: nc.Details.IsCA,
|
||||||
|
Curve: nc.Details.Curve,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ipNet := range nc.Details.Ips {
|
for _, ipNet := range nc.Details.Ips {
|
||||||
@@ -461,6 +858,25 @@ func (nc *NebulaCertificate) Sha256Sum() (string, error) {
|
|||||||
return hex.EncodeToString(sum[:]), nil
|
return hex.EncodeToString(sum[:]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: This uses an internal cache that will not be invalidated automatically
|
||||||
|
// if you manually change any fields in the NebulaCertificate.
|
||||||
|
func (nc *NebulaCertificate) sha256SumWithCache(useCache bool) (string, error) {
|
||||||
|
if !useCache {
|
||||||
|
return nc.Sha256Sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := nc.sha256sum.Load(); s != nil {
|
||||||
|
return *s, nil
|
||||||
|
}
|
||||||
|
s, err := nc.Sha256Sum()
|
||||||
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nc.sha256sum.Store(&s)
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
|
func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
|
||||||
toString := func(ips []*net.IPNet) []string {
|
toString := func(ips []*net.IPNet) []string {
|
||||||
s := []string{}
|
s := []string{}
|
||||||
@@ -482,6 +898,7 @@ func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
|
|||||||
"publicKey": fmt.Sprintf("%x", nc.Details.PublicKey),
|
"publicKey": fmt.Sprintf("%x", nc.Details.PublicKey),
|
||||||
"isCa": nc.Details.IsCA,
|
"isCa": nc.Details.IsCA,
|
||||||
"issuer": nc.Details.Issuer,
|
"issuer": nc.Details.Issuer,
|
||||||
|
"curve": nc.Details.Curve.String(),
|
||||||
},
|
},
|
||||||
"fingerprint": fp,
|
"fingerprint": fp,
|
||||||
"signature": fmt.Sprintf("%x", nc.Signature),
|
"signature": fmt.Sprintf("%x", nc.Signature),
|
||||||
|
|||||||
356
cert/cert.pb.go
356
cert/cert.pb.go
@@ -1,7 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.28.0
|
// protoc-gen-go v1.30.0
|
||||||
// protoc v3.20.0
|
// protoc v3.21.5
|
||||||
// source: cert.proto
|
// source: cert.proto
|
||||||
|
|
||||||
package cert
|
package cert
|
||||||
@@ -20,6 +20,52 @@ const (
|
|||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Curve int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Curve_CURVE25519 Curve = 0
|
||||||
|
Curve_P256 Curve = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Enum value maps for Curve.
|
||||||
|
var (
|
||||||
|
Curve_name = map[int32]string{
|
||||||
|
0: "CURVE25519",
|
||||||
|
1: "P256",
|
||||||
|
}
|
||||||
|
Curve_value = map[string]int32{
|
||||||
|
"CURVE25519": 0,
|
||||||
|
"P256": 1,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (x Curve) Enum() *Curve {
|
||||||
|
p := new(Curve)
|
||||||
|
*p = x
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Curve) String() string {
|
||||||
|
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Curve) Descriptor() protoreflect.EnumDescriptor {
|
||||||
|
return file_cert_proto_enumTypes[0].Descriptor()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Curve) Type() protoreflect.EnumType {
|
||||||
|
return &file_cert_proto_enumTypes[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Curve) Number() protoreflect.EnumNumber {
|
||||||
|
return protoreflect.EnumNumber(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use Curve.Descriptor instead.
|
||||||
|
func (Curve) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
type RawNebulaCertificate struct {
|
type RawNebulaCertificate struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
@@ -91,6 +137,7 @@ type RawNebulaCertificateDetails struct {
|
|||||||
IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"`
|
IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"`
|
||||||
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
||||||
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"`
|
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"`
|
||||||
|
Curve Curve `protobuf:"varint,100,opt,name=curve,proto3,enum=cert.Curve" json:"curve,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RawNebulaCertificateDetails) Reset() {
|
func (x *RawNebulaCertificateDetails) Reset() {
|
||||||
@@ -188,6 +235,202 @@ func (x *RawNebulaCertificateDetails) GetIssuer() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaCertificateDetails) GetCurve() Curve {
|
||||||
|
if x != nil {
|
||||||
|
return x.Curve
|
||||||
|
}
|
||||||
|
return Curve_CURVE25519
|
||||||
|
}
|
||||||
|
|
||||||
|
type RawNebulaEncryptedData struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
EncryptionMetadata *RawNebulaEncryptionMetadata `protobuf:"bytes,1,opt,name=EncryptionMetadata,proto3" json:"EncryptionMetadata,omitempty"`
|
||||||
|
Ciphertext []byte `protobuf:"bytes,2,opt,name=Ciphertext,proto3" json:"Ciphertext,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) Reset() {
|
||||||
|
*x = RawNebulaEncryptedData{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RawNebulaEncryptedData) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaEncryptedData.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RawNebulaEncryptedData) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) GetEncryptionMetadata() *RawNebulaEncryptionMetadata {
|
||||||
|
if x != nil {
|
||||||
|
return x.EncryptionMetadata
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptedData) GetCiphertext() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Ciphertext
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RawNebulaEncryptionMetadata struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=EncryptionAlgorithm,proto3" json:"EncryptionAlgorithm,omitempty"`
|
||||||
|
Argon2Parameters *RawNebulaArgon2Parameters `protobuf:"bytes,2,opt,name=Argon2Parameters,proto3" json:"Argon2Parameters,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) Reset() {
|
||||||
|
*x = RawNebulaEncryptionMetadata{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[3]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RawNebulaEncryptionMetadata) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[3]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaEncryptionMetadata.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RawNebulaEncryptionMetadata) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) GetEncryptionAlgorithm() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.EncryptionAlgorithm
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaEncryptionMetadata) GetArgon2Parameters() *RawNebulaArgon2Parameters {
|
||||||
|
if x != nil {
|
||||||
|
return x.Argon2Parameters
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RawNebulaArgon2Parameters struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // rune in Go
|
||||||
|
Memory uint32 `protobuf:"varint,2,opt,name=memory,proto3" json:"memory,omitempty"`
|
||||||
|
Parallelism uint32 `protobuf:"varint,4,opt,name=parallelism,proto3" json:"parallelism,omitempty"` // uint8 in Go
|
||||||
|
Iterations uint32 `protobuf:"varint,3,opt,name=iterations,proto3" json:"iterations,omitempty"`
|
||||||
|
Salt []byte `protobuf:"bytes,5,opt,name=salt,proto3" json:"salt,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) Reset() {
|
||||||
|
*x = RawNebulaArgon2Parameters{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[4]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RawNebulaArgon2Parameters) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[4]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaArgon2Parameters.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RawNebulaArgon2Parameters) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cert_proto_rawDescGZIP(), []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetVersion() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Version
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetMemory() uint32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Memory
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetParallelism() uint32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Parallelism
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetIterations() uint32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Iterations
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaArgon2Parameters) GetSalt() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Salt
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var File_cert_proto protoreflect.FileDescriptor
|
var File_cert_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_cert_proto_rawDesc = []byte{
|
var file_cert_proto_rawDesc = []byte{
|
||||||
@@ -199,7 +442,7 @@ var file_cert_proto_rawDesc = []byte{
|
|||||||
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
|
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
|
||||||
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
||||||
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
|
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
|
||||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
||||||
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
|
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
|
||||||
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
|
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
|
||||||
@@ -215,9 +458,43 @@ var file_cert_proto_rawDesc = []byte{
|
|||||||
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
|
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
|
||||||
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
|
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
|
||||||
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
|
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
|
||||||
0x72, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
0x72, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0e,
|
||||||
0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71, 0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63,
|
0x32, 0x0b, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x75, 0x72, 0x76, 0x65, 0x52, 0x05, 0x63,
|
||||||
0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
0x75, 0x72, 0x76, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
||||||
|
0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
|
||||||
|
0x51, 0x0a, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
||||||
|
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
|
||||||
|
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72,
|
||||||
|
0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12,
|
||||||
|
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74,
|
||||||
|
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
|
||||||
|
0x78, 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61,
|
||||||
|
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
|
||||||
|
0x69, 0x74, 0x68, 0x6d, 0x12, 0x4b, 0x0a, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61,
|
||||||
|
0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
|
||||||
|
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||||
|
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52,
|
||||||
|
0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
|
||||||
|
0x73, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||||
|
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12,
|
||||||
|
0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||||
|
0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d,
|
||||||
|
0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72,
|
||||||
|
0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d,
|
||||||
|
0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c,
|
||||||
|
0x69, 0x73, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||||
|
0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||||
|
0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x2a, 0x21, 0x0a, 0x05, 0x43, 0x75, 0x72, 0x76, 0x65,
|
||||||
|
0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x52, 0x56, 0x45, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x00,
|
||||||
|
0x12, 0x08, 0x0a, 0x04, 0x50, 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69,
|
||||||
|
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71,
|
||||||
|
0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72,
|
||||||
|
0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -232,18 +509,26 @@ func file_cert_proto_rawDescGZIP() []byte {
|
|||||||
return file_cert_proto_rawDescData
|
return file_cert_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
var file_cert_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||||
|
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||||
var file_cert_proto_goTypes = []interface{}{
|
var file_cert_proto_goTypes = []interface{}{
|
||||||
(*RawNebulaCertificate)(nil), // 0: cert.RawNebulaCertificate
|
(Curve)(0), // 0: cert.Curve
|
||||||
(*RawNebulaCertificateDetails)(nil), // 1: cert.RawNebulaCertificateDetails
|
(*RawNebulaCertificate)(nil), // 1: cert.RawNebulaCertificate
|
||||||
|
(*RawNebulaCertificateDetails)(nil), // 2: cert.RawNebulaCertificateDetails
|
||||||
|
(*RawNebulaEncryptedData)(nil), // 3: cert.RawNebulaEncryptedData
|
||||||
|
(*RawNebulaEncryptionMetadata)(nil), // 4: cert.RawNebulaEncryptionMetadata
|
||||||
|
(*RawNebulaArgon2Parameters)(nil), // 5: cert.RawNebulaArgon2Parameters
|
||||||
}
|
}
|
||||||
var file_cert_proto_depIdxs = []int32{
|
var file_cert_proto_depIdxs = []int32{
|
||||||
1, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
2, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
||||||
1, // [1:1] is the sub-list for method output_type
|
0, // 1: cert.RawNebulaCertificateDetails.curve:type_name -> cert.Curve
|
||||||
1, // [1:1] is the sub-list for method input_type
|
4, // 2: cert.RawNebulaEncryptedData.EncryptionMetadata:type_name -> cert.RawNebulaEncryptionMetadata
|
||||||
1, // [1:1] is the sub-list for extension type_name
|
5, // 3: cert.RawNebulaEncryptionMetadata.Argon2Parameters:type_name -> cert.RawNebulaArgon2Parameters
|
||||||
1, // [1:1] is the sub-list for extension extendee
|
4, // [4:4] is the sub-list for method output_type
|
||||||
0, // [0:1] is the sub-list for field type_name
|
4, // [4:4] is the sub-list for method input_type
|
||||||
|
4, // [4:4] is the sub-list for extension type_name
|
||||||
|
4, // [4:4] is the sub-list for extension extendee
|
||||||
|
0, // [0:4] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { file_cert_proto_init() }
|
func init() { file_cert_proto_init() }
|
||||||
@@ -276,19 +561,56 @@ func file_cert_proto_init() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
file_cert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaEncryptedData); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cert_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaEncryptionMetadata); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cert_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaArgon2Parameters); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_cert_proto_rawDesc,
|
RawDescriptor: file_cert_proto_rawDesc,
|
||||||
NumEnums: 0,
|
NumEnums: 1,
|
||||||
NumMessages: 2,
|
NumMessages: 5,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
NumServices: 0,
|
NumServices: 0,
|
||||||
},
|
},
|
||||||
GoTypes: file_cert_proto_goTypes,
|
GoTypes: file_cert_proto_goTypes,
|
||||||
DependencyIndexes: file_cert_proto_depIdxs,
|
DependencyIndexes: file_cert_proto_depIdxs,
|
||||||
|
EnumInfos: file_cert_proto_enumTypes,
|
||||||
MessageInfos: file_cert_proto_msgTypes,
|
MessageInfos: file_cert_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_cert_proto = out.File
|
File_cert_proto = out.File
|
||||||
|
|||||||
@@ -5,6 +5,11 @@ option go_package = "github.com/slackhq/nebula/cert";
|
|||||||
|
|
||||||
//import "google/protobuf/timestamp.proto";
|
//import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
enum Curve {
|
||||||
|
CURVE25519 = 0;
|
||||||
|
P256 = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message RawNebulaCertificate {
|
message RawNebulaCertificate {
|
||||||
RawNebulaCertificateDetails Details = 1;
|
RawNebulaCertificateDetails Details = 1;
|
||||||
bytes Signature = 2;
|
bytes Signature = 2;
|
||||||
@@ -26,4 +31,24 @@ message RawNebulaCertificateDetails {
|
|||||||
|
|
||||||
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
||||||
bytes Issuer = 9;
|
bytes Issuer = 9;
|
||||||
}
|
|
||||||
|
Curve curve = 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RawNebulaEncryptedData {
|
||||||
|
RawNebulaEncryptionMetadata EncryptionMetadata = 1;
|
||||||
|
bytes Ciphertext = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RawNebulaEncryptionMetadata {
|
||||||
|
string EncryptionAlgorithm = 1;
|
||||||
|
RawNebulaArgon2Parameters Argon2Parameters = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RawNebulaArgon2Parameters {
|
||||||
|
int32 version = 1; // rune in Go
|
||||||
|
uint32 memory = 2;
|
||||||
|
uint32 parallelism = 4; // uint8 in Go
|
||||||
|
uint32 iterations = 3;
|
||||||
|
bytes salt = 5;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
package cert
|
package cert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ecdh"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -101,7 +104,49 @@ func TestNebulaCertificate_Sign(t *testing.T) {
|
|||||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.False(t, nc.CheckSignature(pub))
|
assert.False(t, nc.CheckSignature(pub))
|
||||||
assert.Nil(t, nc.Sign(priv))
|
assert.Nil(t, nc.Sign(Curve_CURVE25519, priv))
|
||||||
|
assert.True(t, nc.CheckSignature(pub))
|
||||||
|
|
||||||
|
_, err = nc.Marshal()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
//t.Log("Cert size:", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNebulaCertificate_SignP256(t *testing.T) {
|
||||||
|
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
pubKey := []byte("01234567890abcedfghij1234567890ab1234567890abcedfghij1234567890ab")
|
||||||
|
|
||||||
|
nc := NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: "testing",
|
||||||
|
Ips: []*net.IPNet{
|
||||||
|
{IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
|
||||||
|
{IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
|
||||||
|
{IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
|
||||||
|
},
|
||||||
|
Subnets: []*net.IPNet{
|
||||||
|
{IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
|
||||||
|
{IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
|
||||||
|
{IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
|
||||||
|
},
|
||||||
|
Groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||||
|
NotBefore: before,
|
||||||
|
NotAfter: after,
|
||||||
|
PublicKey: pubKey,
|
||||||
|
IsCA: false,
|
||||||
|
Curve: Curve_P256,
|
||||||
|
Issuer: "1234567890abcedfghij1234567890ab",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
|
||||||
|
rawPriv := priv.D.FillBytes(make([]byte, 32))
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.False(t, nc.CheckSignature(pub))
|
||||||
|
assert.Nil(t, nc.Sign(Curve_P256, rawPriv))
|
||||||
assert.True(t, nc.CheckSignature(pub))
|
assert.True(t, nc.CheckSignature(pub))
|
||||||
|
|
||||||
_, err = nc.Marshal()
|
_, err = nc.Marshal()
|
||||||
@@ -153,7 +198,7 @@ func TestNebulaCertificate_MarshalJSON(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"{\"details\":{\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"ips\":[\"10.1.1.1/24\",\"10.1.1.2/16\",\"10.1.1.3/ff00ff00\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"subnets\":[\"9.1.1.1/ff00ff00\",\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"26cb1c30ad7872c804c166b5150fa372f437aa3856b04edb4334b4470ec728e4\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\"}",
|
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"ips\":[\"10.1.1.1/24\",\"10.1.1.2/16\",\"10.1.1.3/ff00ff00\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"subnets\":[\"9.1.1.1/ff00ff00\",\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"26cb1c30ad7872c804c166b5150fa372f437aa3856b04edb4334b4470ec728e4\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\"}",
|
||||||
string(b),
|
string(b),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -177,7 +222,7 @@ func TestNebulaCertificate_Verify(t *testing.T) {
|
|||||||
|
|
||||||
v, err := c.Verify(time.Now(), caPool)
|
v, err := c.Verify(time.Now(), caPool)
|
||||||
assert.False(t, v)
|
assert.False(t, v)
|
||||||
assert.EqualError(t, err, "certificate has been blocked")
|
assert.EqualError(t, err, "certificate is in the block list")
|
||||||
|
|
||||||
caPool.ResetCertBlocklist()
|
caPool.ResetCertBlocklist()
|
||||||
v, err = c.Verify(time.Now(), caPool)
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
@@ -217,6 +262,65 @@ func TestNebulaCertificate_Verify(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNebulaCertificate_VerifyP256(t *testing.T) {
|
||||||
|
ca, _, caKey, err := newTestCaCertP256(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
h, err := ca.Sha256Sum()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
caPool := NewCAPool()
|
||||||
|
caPool.CAs[h] = ca
|
||||||
|
|
||||||
|
f, err := c.Sha256Sum()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
caPool.BlocklistFingerprint(f)
|
||||||
|
|
||||||
|
v, err := c.Verify(time.Now(), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "certificate is in the block list")
|
||||||
|
|
||||||
|
caPool.ResetCertBlocklist()
|
||||||
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
|
assert.True(t, v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
v, err = c.Verify(time.Now().Add(time.Hour*1000), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "root certificate is expired")
|
||||||
|
|
||||||
|
c, _, _, err = newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
v, err = c.Verify(time.Now().Add(time.Minute*6), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "certificate is expired")
|
||||||
|
|
||||||
|
// Test group assertion
|
||||||
|
ca, _, caKey, err = newTestCaCertP256(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "test2"})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
caPem, err := ca.MarshalToPEM()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
caPool = NewCAPool()
|
||||||
|
caPool.AddCACertificate(caPem)
|
||||||
|
|
||||||
|
c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "bad"})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
|
assert.False(t, v)
|
||||||
|
assert.EqualError(t, err, "certificate contained a group not present on the signing ca: bad")
|
||||||
|
|
||||||
|
c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1"})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
|
assert.True(t, v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNebulaCertificate_Verify_IPs(t *testing.T) {
|
func TestNebulaCertificate_Verify_IPs(t *testing.T) {
|
||||||
_, caIp1, _ := net.ParseCIDR("10.0.0.0/16")
|
_, caIp1, _ := net.ParseCIDR("10.0.0.0/16")
|
||||||
_, caIp2, _ := net.ParseCIDR("192.168.0.0/24")
|
_, caIp2, _ := net.ParseCIDR("192.168.0.0/24")
|
||||||
@@ -378,20 +482,40 @@ func TestNebulaCertificate_Verify_Subnets(t *testing.T) {
|
|||||||
func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) {
|
func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) {
|
||||||
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
err = ca.VerifyPrivateKey(caKey)
|
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
_, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
_, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
err = ca.VerifyPrivateKey(caKey2)
|
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey2)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
err = c.VerifyPrivateKey(priv)
|
err = c.VerifyPrivateKey(Curve_CURVE25519, priv)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
_, priv2 := x25519Keypair()
|
_, priv2 := x25519Keypair()
|
||||||
err = c.VerifyPrivateKey(priv2)
|
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNebulaCertificate_VerifyPrivateKeyP256(t *testing.T) {
|
||||||
|
ca, _, caKey, err := newTestCaCertP256(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
err = ca.VerifyPrivateKey(Curve_P256, caKey)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
_, _, caKey2, err := newTestCaCertP256(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
err = ca.VerifyPrivateKey(Curve_P256, caKey2)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
err = c.VerifyPrivateKey(Curve_P256, priv)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
_, priv2 := p256Keypair()
|
||||||
|
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -438,6 +562,16 @@ CjkKB2V4cGlyZWQouPmWjQYwufmWjQY6ILCRaoCkJlqHgv5jfDN4lzLHBvDzaQm4
|
|||||||
vZxfu144hmgjQAESQG4qlnZi8DncvD/LDZnLgJHOaX1DWCHHEh59epVsC+BNgTie
|
vZxfu144hmgjQAESQG4qlnZi8DncvD/LDZnLgJHOaX1DWCHHEh59epVsC+BNgTie
|
||||||
WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
||||||
-----END NEBULA CERTIFICATE-----
|
-----END NEBULA CERTIFICATE-----
|
||||||
|
`
|
||||||
|
|
||||||
|
p256 := `
|
||||||
|
# p256 certificate
|
||||||
|
-----BEGIN NEBULA CERTIFICATE-----
|
||||||
|
CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
||||||
|
6tctO9sPT3jOx8ES6M1nIqOhpTmZeabF/4rELDqPV4aH5jfJut798DUXql0FlF8H
|
||||||
|
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
|
||||||
|
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
||||||
|
-----END NEBULA CERTIFICATE-----
|
||||||
`
|
`
|
||||||
|
|
||||||
rootCA := NebulaCertificate{
|
rootCA := NebulaCertificate{
|
||||||
@@ -452,6 +586,12 @@ WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rootCAP256 := NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: "nebula P256 test",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
p, err := NewCAPoolFromBytes([]byte(noNewLines))
|
p, err := NewCAPoolFromBytes([]byte(noNewLines))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
@@ -474,6 +614,11 @@ WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
|
|||||||
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
||||||
assert.Equal(t, len(pppp.CAs), 3)
|
assert.Equal(t, len(pppp.CAs), 3)
|
||||||
|
|
||||||
|
ppppp, err := NewCAPoolFromBytes([]byte(p256))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
||||||
|
assert.Equal(t, len(ppppp.CAs), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendByteSlices(b ...[]byte) []byte {
|
func appendByteSlices(b ...[]byte) []byte {
|
||||||
@@ -529,11 +674,16 @@ bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
|||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalEd25519PrivateKey(t *testing.T) {
|
func TestUnmarshalSigningPrivateKey(t *testing.T) {
|
||||||
privKey := []byte(`# A good key
|
privKey := []byte(`# A good key
|
||||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
privP256Key := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA ECDSA P256 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA ECDSA P256 PRIVATE KEY-----
|
||||||
`)
|
`)
|
||||||
shortKey := []byte(`# A short key
|
shortKey := []byte(`# A short key
|
||||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
@@ -550,39 +700,139 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
-END NEBULA ED25519 PRIVATE KEY-----`)
|
-END NEBULA ED25519 PRIVATE KEY-----`)
|
||||||
|
|
||||||
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, err := UnmarshalEd25519PrivateKey(keyBundle)
|
k, rest, curve, err := UnmarshalSigningPrivateKey(keyBundle)
|
||||||
assert.Len(t, k, 64)
|
assert.Len(t, k, 64)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
|
assert.Len(t, k, 32)
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_P256, curve)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// Fail due to short key
|
// Fail due to short key
|
||||||
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
|
assert.EqualError(t, err, "key was not 64 bytes, is invalid Ed25519 private key")
|
||||||
|
|
||||||
// Fail due to invalid banner
|
// Fail due to invalid banner
|
||||||
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 private key banner")
|
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519/ECDSA private key banner")
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
// Fail due to ivalid PEM format, because
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalX25519PrivateKey(t *testing.T) {
|
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
|
||||||
|
passphrase := []byte("DO NOT USE THIS KEY")
|
||||||
|
privKey := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
||||||
|
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
||||||
|
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
||||||
|
qrlJ69wer3ZUHFXA
|
||||||
|
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
shortKey := []byte(`# A key which, once decrypted, is too short
|
||||||
|
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCoga5h8owMEBWRSMMJKzuUvWce7
|
||||||
|
k0qlBkQmCxiuLh80MuASW70YcKt8jeEIS2axo2V6zAKA9TSMcCsJW1kDDXEtL/xe
|
||||||
|
GLF5T7sDl5COp4LU3pGxpV+KoeQ/S3gQCAAcnaOtnJQX+aSDnbO3jCHyP7U9CHbs
|
||||||
|
rQr3bdH3Oy/WiYU=
|
||||||
|
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidBanner := []byte(`# Invalid banner (not encrypted)
|
||||||
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
bWRp2CTVFhW9HD/qCd28ltDgK3w8VXSeaEYczDWos8sMUBqDb9jP3+NYwcS4lURG
|
||||||
|
XgLvodMXZJuaFPssp+WwtA==
|
||||||
|
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidPem := []byte(`# Not a valid PEM format
|
||||||
|
-BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
||||||
|
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
||||||
|
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
||||||
|
qrlJ69wer3ZUHFXA
|
||||||
|
-END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
|
||||||
|
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, keyBundle)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Len(t, k, 64)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
|
||||||
|
// Fail due to short key
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||||
|
assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
|
|
||||||
|
// Fail due to invalid banner
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||||
|
assert.EqualError(t, err, "bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
|
// Fail due to ivalid PEM format, because
|
||||||
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||||
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
|
// Fail due to invalid passphrase
|
||||||
|
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey([]byte("invalid passphrase"), privKey)
|
||||||
|
assert.EqualError(t, err, "invalid passphrase or corrupt private key")
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, []byte{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncryptAndMarshalSigningPrivateKey(t *testing.T) {
|
||||||
|
// Having proved that decryption works correctly above, we can test the
|
||||||
|
// encryption function produces a value which can be decrypted
|
||||||
|
passphrase := []byte("passphrase")
|
||||||
|
bytes := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
||||||
|
kdfParams := NewArgon2Parameters(64*1024, 4, 3)
|
||||||
|
key, err := EncryptAndMarshalSigningPrivateKey(Curve_CURVE25519, bytes, passphrase, kdfParams)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Verify the "key" can be decrypted successfully
|
||||||
|
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, key)
|
||||||
|
assert.Len(t, k, 64)
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Equal(t, rest, []byte{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// EncryptAndMarshalEd25519PrivateKey does not create any errors itself
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalPrivateKey(t *testing.T) {
|
||||||
privKey := []byte(`# A good key
|
privKey := []byte(`# A good key
|
||||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-----END NEBULA X25519 PRIVATE KEY-----
|
-----END NEBULA X25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
privP256Key := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA P256 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA P256 PRIVATE KEY-----
|
||||||
`)
|
`)
|
||||||
shortKey := []byte(`# A short key
|
shortKey := []byte(`# A short key
|
||||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||||
@@ -599,29 +849,37 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-END NEBULA X25519 PRIVATE KEY-----`)
|
-END NEBULA X25519 PRIVATE KEY-----`)
|
||||||
|
|
||||||
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, err := UnmarshalX25519PrivateKey(keyBundle)
|
k, rest, curve, err := UnmarshalPrivateKey(keyBundle)
|
||||||
|
assert.Len(t, k, 32)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Len(t, k, 32)
|
assert.Len(t, k, 32)
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_P256, curve)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// Fail due to short key
|
// Fail due to short key
|
||||||
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 private key")
|
assert.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 private key")
|
||||||
|
|
||||||
// Fail due to invalid banner
|
// Fail due to invalid banner
|
||||||
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 private key banner")
|
assert.EqualError(t, err, "bytes did not contain a proper nebula private key banner")
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
// Fail due to ivalid PEM format, because
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
k, rest, curve, err = UnmarshalPrivateKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
@@ -681,6 +939,12 @@ func TestUnmarshalX25519PublicKey(t *testing.T) {
|
|||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-----END NEBULA X25519 PUBLIC KEY-----
|
-----END NEBULA X25519 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
pubP256Key := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA P256 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA P256 PUBLIC KEY-----
|
||||||
`)
|
`)
|
||||||
shortKey := []byte(`# A short key
|
shortKey := []byte(`# A short key
|
||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
@@ -697,29 +961,37 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-END NEBULA X25519 PUBLIC KEY-----`)
|
-END NEBULA X25519 PUBLIC KEY-----`)
|
||||||
|
|
||||||
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
|
keyBundle := appendByteSlices(pubKey, pubP256Key, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, err := UnmarshalX25519PublicKey(keyBundle)
|
k, rest, curve, err := UnmarshalPublicKey(keyBundle)
|
||||||
assert.Equal(t, len(k), 32)
|
assert.Equal(t, len(k), 32)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(pubP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
|
assert.Equal(t, len(k), 65)
|
||||||
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_P256, curve)
|
||||||
|
|
||||||
// Fail due to short key
|
// Fail due to short key
|
||||||
k, rest, err = UnmarshalX25519PublicKey(rest)
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 public key")
|
assert.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
|
||||||
|
|
||||||
// Fail due to invalid banner
|
// Fail due to invalid banner
|
||||||
k, rest, err = UnmarshalX25519PublicKey(rest)
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 public key banner")
|
assert.EqualError(t, err, "bytes did not contain a proper nebula public key banner")
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
// Fail due to ivalid PEM format, because
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
k, rest, err = UnmarshalX25519PublicKey(rest)
|
k, rest, curve, err = UnmarshalPublicKey(rest)
|
||||||
assert.Nil(t, k)
|
assert.Nil(t, k)
|
||||||
assert.Equal(t, rest, invalidPem)
|
assert.Equal(t, rest, invalidPem)
|
||||||
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
@@ -816,13 +1088,56 @@ func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []
|
|||||||
nc.Details.Groups = groups
|
nc.Details.Groups = groups
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(priv)
|
err = nc.Sign(Curve_CURVE25519, priv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
return nc, pub, priv, nil
|
return nc, pub, priv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newTestCaCertP256(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
||||||
|
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
|
||||||
|
rawPriv := priv.D.FillBytes(make([]byte, 32))
|
||||||
|
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
nc := &NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: "test ca",
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: true,
|
||||||
|
Curve: Curve_P256,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ips) > 0 {
|
||||||
|
nc.Details.Ips = ips
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subnets) > 0 {
|
||||||
|
nc.Details.Subnets = subnets
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(groups) > 0 {
|
||||||
|
nc.Details.Groups = groups
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(Curve_P256, rawPriv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
return nc, pub, rawPriv, nil
|
||||||
|
}
|
||||||
|
|
||||||
func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
||||||
issuer, err := ca.Sha256Sum()
|
issuer, err := ca.Sha256Sum()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -856,7 +1171,16 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, rawPriv := x25519Keypair()
|
var pub, rawPriv []byte
|
||||||
|
|
||||||
|
switch ca.Details.Curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
pub, rawPriv = x25519Keypair()
|
||||||
|
case Curve_P256:
|
||||||
|
pub, rawPriv = p256Keypair()
|
||||||
|
default:
|
||||||
|
return nil, nil, nil, fmt.Errorf("unknown curve: %v", ca.Details.Curve)
|
||||||
|
}
|
||||||
|
|
||||||
nc := &NebulaCertificate{
|
nc := &NebulaCertificate{
|
||||||
Details: NebulaCertificateDetails{
|
Details: NebulaCertificateDetails{
|
||||||
@@ -868,12 +1192,13 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
|
|||||||
NotAfter: time.Unix(after.Unix(), 0),
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: false,
|
IsCA: false,
|
||||||
|
Curve: ca.Details.Curve,
|
||||||
Issuer: issuer,
|
Issuer: issuer,
|
||||||
InvertedGroups: make(map[string]struct{}),
|
InvertedGroups: make(map[string]struct{}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(key)
|
err = nc.Sign(ca.Details.Curve, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -894,3 +1219,12 @@ func x25519Keypair() ([]byte, []byte) {
|
|||||||
|
|
||||||
return pubkey, privkey
|
return pubkey, privkey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func p256Keypair() ([]byte, []byte) {
|
||||||
|
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pubkey := privkey.PublicKey()
|
||||||
|
return pubkey.Bytes(), privkey.Bytes()
|
||||||
|
}
|
||||||
|
|||||||
140
cert/crypto.go
Normal file
140
cert/crypto.go
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
package cert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/argon2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KDF factors
|
||||||
|
type Argon2Parameters struct {
|
||||||
|
version rune
|
||||||
|
Memory uint32 // KiB
|
||||||
|
Parallelism uint8
|
||||||
|
Iterations uint32
|
||||||
|
salt []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a new Argon2Parameters object with current version set
|
||||||
|
func NewArgon2Parameters(memory uint32, parallelism uint8, iterations uint32) *Argon2Parameters {
|
||||||
|
return &Argon2Parameters{
|
||||||
|
version: argon2.Version,
|
||||||
|
Memory: memory, // KiB
|
||||||
|
Parallelism: parallelism,
|
||||||
|
Iterations: iterations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypts data using AES-256-GCM and the Argon2id key derivation function
|
||||||
|
func aes256Encrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) {
|
||||||
|
key, err := aes256DeriveKey(passphrase, kdfParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// this should never happen, but since this dictates how our calls into the
|
||||||
|
// aes package behave and could be catastraphic, let's sanity check this
|
||||||
|
if len(key) != 32 {
|
||||||
|
return nil, fmt.Errorf("invalid AES-256 key length (%d) - cowardly refusing to encrypt", len(key))
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce := make([]byte, gcm.NonceSize())
|
||||||
|
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ciphertext := gcm.Seal(nil, nonce, data, nil)
|
||||||
|
blob := joinNonceCiphertext(nonce, ciphertext)
|
||||||
|
|
||||||
|
return blob, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypts data using AES-256-GCM and the Argon2id key derivation function
|
||||||
|
// Expects the data to include an Argon2id parameter string before the encrypted data
|
||||||
|
func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) {
|
||||||
|
key, err := aes256DeriveKey(passphrase, kdfParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
|
||||||
|
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid passphrase or corrupt private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
return plaintext, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func aes256DeriveKey(passphrase []byte, params *Argon2Parameters) ([]byte, error) {
|
||||||
|
if params.salt == nil {
|
||||||
|
params.salt = make([]byte, 32)
|
||||||
|
if _, err := rand.Read(params.salt); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// keySize of 32 bytes will result in AES-256 encryption
|
||||||
|
key, err := deriveKey(passphrase, 32, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derives a key from a passphrase using Argon2id
|
||||||
|
func deriveKey(passphrase []byte, keySize uint32, params *Argon2Parameters) ([]byte, error) {
|
||||||
|
if params.version != argon2.Version {
|
||||||
|
return nil, fmt.Errorf("incompatible Argon2 version: %d", params.version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if params.salt == nil {
|
||||||
|
return nil, fmt.Errorf("salt must be set in argon2Parameters")
|
||||||
|
} else if len(params.salt) < 16 {
|
||||||
|
return nil, fmt.Errorf("salt must be at least 128 bits")
|
||||||
|
}
|
||||||
|
|
||||||
|
key := argon2.IDKey(passphrase, params.salt, params.Iterations, params.Memory, params.Parallelism, keySize)
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepends nonce to ciphertext
|
||||||
|
func joinNonceCiphertext(nonce []byte, ciphertext []byte) []byte {
|
||||||
|
return append(nonce, ciphertext...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Splits nonce from ciphertext
|
||||||
|
func splitNonceCiphertext(blob []byte, nonceSize int) ([]byte, []byte, error) {
|
||||||
|
if len(blob) <= nonceSize {
|
||||||
|
return nil, nil, fmt.Errorf("invalid ciphertext blob - blob shorter than nonce length")
|
||||||
|
}
|
||||||
|
|
||||||
|
return blob[:nonceSize], blob[nonceSize:], nil
|
||||||
|
}
|
||||||
25
cert/crypto_test.go
Normal file
25
cert/crypto_test.go
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
package cert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/crypto/argon2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewArgon2Parameters(t *testing.T) {
|
||||||
|
p := NewArgon2Parameters(64*1024, 4, 3)
|
||||||
|
assert.EqualValues(t, &Argon2Parameters{
|
||||||
|
version: argon2.Version,
|
||||||
|
Memory: 64 * 1024,
|
||||||
|
Parallelism: 4,
|
||||||
|
Iterations: 3,
|
||||||
|
}, p)
|
||||||
|
p = NewArgon2Parameters(2*1024*1024, 2, 1)
|
||||||
|
assert.EqualValues(t, &Argon2Parameters{
|
||||||
|
version: argon2.Version,
|
||||||
|
Memory: 2 * 1024 * 1024,
|
||||||
|
Parallelism: 2,
|
||||||
|
Iterations: 1,
|
||||||
|
}, p)
|
||||||
|
}
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
package cert
|
package cert
|
||||||
|
|
||||||
import "errors"
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrExpired = errors.New("certificate is expired")
|
ErrRootExpired = errors.New("root certificate is expired")
|
||||||
ErrNotCA = errors.New("certificate is not a CA")
|
ErrExpired = errors.New("certificate is expired")
|
||||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
ErrNotCA = errors.New("certificate is not a CA")
|
||||||
|
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||||
|
ErrBlockListed = errors.New("certificate is in the block list")
|
||||||
|
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -13,8 +13,14 @@ type Node struct {
|
|||||||
value interface{}
|
value interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type entry struct {
|
||||||
|
CIDR *net.IPNet
|
||||||
|
Value *interface{}
|
||||||
|
}
|
||||||
|
|
||||||
type Tree4 struct {
|
type Tree4 struct {
|
||||||
root *Node
|
root *Node
|
||||||
|
list []entry
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -24,6 +30,7 @@ const (
|
|||||||
func NewTree4() *Tree4 {
|
func NewTree4() *Tree4 {
|
||||||
tree := new(Tree4)
|
tree := new(Tree4)
|
||||||
tree.root = &Node{}
|
tree.root = &Node{}
|
||||||
|
tree.list = []entry{}
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,6 +60,15 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
|
|
||||||
// We already have this range so update the value
|
// We already have this range so update the value
|
||||||
if next != nil {
|
if next != nil {
|
||||||
|
addCIDR := cidr.String()
|
||||||
|
for i, v := range tree.list {
|
||||||
|
if addCIDR == v.CIDR.String() {
|
||||||
|
tree.list = append(tree.list[:i], tree.list[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||||
node.value = val
|
node.value = val
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -74,9 +90,10 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
|
|
||||||
// Final node marks our cidr, set the value
|
// Final node marks our cidr, set the value
|
||||||
node.value = val
|
node.value = val
|
||||||
|
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the first match, which may be the least specific
|
// Contains finds the first match, which may be the least specific
|
||||||
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
@@ -99,7 +116,7 @@ func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
|||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the most specific match
|
// MostSpecificContains finds the most specific match
|
||||||
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
@@ -121,7 +138,7 @@ func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
|||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the most specific match
|
// Match finds the most specific match
|
||||||
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
@@ -143,3 +160,8 @@ func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
|||||||
}
|
}
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List will return all CIDRs and their current values. Do not modify the contents!
|
||||||
|
func (tree *Tree4) List() []entry {
|
||||||
|
return tree.list
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,6 +8,20 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestCIDRTree_List(t *testing.T) {
|
||||||
|
tree := NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "4")
|
||||||
|
list := tree.List()
|
||||||
|
assert.Len(t, list, 2)
|
||||||
|
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
|
||||||
|
assert.Equal(t, "2", *list[0].Value)
|
||||||
|
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
|
||||||
|
assert.Equal(t, "4", *list[1].Value)
|
||||||
|
}
|
||||||
|
|
||||||
func TestCIDRTree_Contains(t *testing.T) {
|
func TestCIDRTree_Contains(t *testing.T) {
|
||||||
tree := NewTree4()
|
tree := NewTree4()
|
||||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
|||||||
@@ -1,11 +1,14 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -17,15 +20,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type caFlags struct {
|
type caFlags struct {
|
||||||
set *flag.FlagSet
|
set *flag.FlagSet
|
||||||
name *string
|
name *string
|
||||||
duration *time.Duration
|
duration *time.Duration
|
||||||
outKeyPath *string
|
outKeyPath *string
|
||||||
outCertPath *string
|
outCertPath *string
|
||||||
outQRPath *string
|
outQRPath *string
|
||||||
groups *string
|
groups *string
|
||||||
ips *string
|
ips *string
|
||||||
subnets *string
|
subnets *string
|
||||||
|
argonMemory *uint
|
||||||
|
argonIterations *uint
|
||||||
|
argonParallelism *uint
|
||||||
|
encryption *bool
|
||||||
|
|
||||||
|
curve *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCaFlags() *caFlags {
|
func newCaFlags() *caFlags {
|
||||||
@@ -39,10 +48,29 @@ func newCaFlags() *caFlags {
|
|||||||
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
||||||
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses")
|
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses")
|
||||||
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets")
|
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets")
|
||||||
|
cf.argonMemory = cf.set.Uint("argon-memory", 2*1024*1024, "Optional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase")
|
||||||
|
cf.argonParallelism = cf.set.Uint("argon-parallelism", 4, "Optional: Argon2 parallelism parameter used for encrypted private key passphrase")
|
||||||
|
cf.argonIterations = cf.set.Uint("argon-iterations", 1, "Optional: Argon2 iterations parameter used for encrypted private key passphrase")
|
||||||
|
cf.encryption = cf.set.Bool("encrypt", false, "Optional: prompt for passphrase and write out-key in an encrypted format")
|
||||||
|
cf.curve = cf.set.String("curve", "25519", "EdDSA/ECDSA Curve (25519, P256)")
|
||||||
return &cf
|
return &cf
|
||||||
}
|
}
|
||||||
|
|
||||||
func ca(args []string, out io.Writer, errOut io.Writer) error {
|
func parseArgonParameters(memory uint, parallelism uint, iterations uint) (*cert.Argon2Parameters, error) {
|
||||||
|
if memory <= 0 || memory > math.MaxUint32 {
|
||||||
|
return nil, newHelpErrorf("-argon-memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
if parallelism <= 0 || parallelism > math.MaxUint8 {
|
||||||
|
return nil, newHelpErrorf("-argon-parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
|
||||||
|
}
|
||||||
|
if iterations <= 0 || iterations > math.MaxUint32 {
|
||||||
|
return nil, newHelpErrorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cert.NewArgon2Parameters(uint32(memory), uint8(parallelism), uint32(iterations)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
|
||||||
cf := newCaFlags()
|
cf := newCaFlags()
|
||||||
err := cf.set.Parse(args)
|
err := cf.set.Parse(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -58,6 +86,12 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
|
if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
var kdfParams *cert.Argon2Parameters
|
||||||
|
if *cf.encryption {
|
||||||
|
if kdfParams, err = parseArgonParameters(*cf.argonMemory, *cf.argonParallelism, *cf.argonIterations); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if *cf.duration <= 0 {
|
if *cf.duration <= 0 {
|
||||||
return &helpError{"-duration must be greater than 0"}
|
return &helpError{"-duration must be greater than 0"}
|
||||||
@@ -109,9 +143,47 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, rawPriv, err := ed25519.GenerateKey(rand.Reader)
|
var passphrase []byte
|
||||||
if err != nil {
|
if *cf.encryption {
|
||||||
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
for i := 0; i < 5; i++ {
|
||||||
|
out.Write([]byte("Enter passphrase: "))
|
||||||
|
passphrase, err = pr.ReadPassword()
|
||||||
|
|
||||||
|
if err == ErrNoTerminal {
|
||||||
|
return fmt.Errorf("out-key must be encrypted interactively")
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("error reading passphrase: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(passphrase) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
return fmt.Errorf("no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var curve cert.Curve
|
||||||
|
var pub, rawPriv []byte
|
||||||
|
switch *cf.curve {
|
||||||
|
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||||
|
curve = cert.Curve_CURVE25519
|
||||||
|
pub, rawPriv, err = ed25519.GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
||||||
|
}
|
||||||
|
case "P256":
|
||||||
|
var key *ecdsa.PrivateKey
|
||||||
|
curve = cert.Curve_P256
|
||||||
|
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||||
|
}
|
||||||
|
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
|
||||||
|
rawPriv = key.D.FillBytes(make([]byte, 32))
|
||||||
|
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y)
|
||||||
}
|
}
|
||||||
|
|
||||||
nc := cert.NebulaCertificate{
|
nc := cert.NebulaCertificate{
|
||||||
@@ -124,6 +196,7 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
NotAfter: time.Now().Add(*cf.duration),
|
NotAfter: time.Now().Add(*cf.duration),
|
||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: true,
|
IsCA: true,
|
||||||
|
Curve: curve,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,12 +208,22 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
|
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(rawPriv)
|
err = nc.Sign(curve, rawPriv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while signing: %s", err)
|
return fmt.Errorf("error while signing: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalEd25519PrivateKey(rawPriv), 0600)
|
if *cf.encryption {
|
||||||
|
b, err := cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while encrypting out-key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(*cf.outKeyPath, b, 0600)
|
||||||
|
} else {
|
||||||
|
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalSigningPrivateKey(curve, rawPriv), 0600)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,8 +5,11 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -26,8 +29,18 @@ func Test_caHelp(t *testing.T) {
|
|||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"Usage of "+os.Args[0]+" ca <flags>: create a self signed certificate authority\n"+
|
"Usage of "+os.Args[0]+" ca <flags>: create a self signed certificate authority\n"+
|
||||||
|
" -argon-iterations uint\n"+
|
||||||
|
" \tOptional: Argon2 iterations parameter used for encrypted private key passphrase (default 1)\n"+
|
||||||
|
" -argon-memory uint\n"+
|
||||||
|
" \tOptional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase (default 2097152)\n"+
|
||||||
|
" -argon-parallelism uint\n"+
|
||||||
|
" \tOptional: Argon2 parallelism parameter used for encrypted private key passphrase (default 4)\n"+
|
||||||
|
" -curve string\n"+
|
||||||
|
" \tEdDSA/ECDSA Curve (25519, P256) (default \"25519\")\n"+
|
||||||
" -duration duration\n"+
|
" -duration duration\n"+
|
||||||
" \tOptional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\" (default 8760h0m0s)\n"+
|
" \tOptional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\" (default 8760h0m0s)\n"+
|
||||||
|
" -encrypt\n"+
|
||||||
|
" \tOptional: prompt for passphrase and write out-key in an encrypted format\n"+
|
||||||
" -groups string\n"+
|
" -groups string\n"+
|
||||||
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
|
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
|
||||||
" -ips string\n"+
|
" -ips string\n"+
|
||||||
@@ -50,18 +63,38 @@ func Test_ca(t *testing.T) {
|
|||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
eb := &bytes.Buffer{}
|
eb := &bytes.Buffer{}
|
||||||
|
|
||||||
|
nopw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
errpw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: errors.New("stub error"),
|
||||||
|
}
|
||||||
|
|
||||||
|
passphrase := []byte("DO NOT USE THIS KEY")
|
||||||
|
testpw := &StubPasswordReader{
|
||||||
|
password: passphrase,
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
pwPromptOb := "Enter passphrase: "
|
||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, ca([]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb), "-name is required")
|
assertHelpError(t, ca(
|
||||||
|
[]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
|
||||||
|
), "-name is required")
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// ipv4 only ips
|
// ipv4 only ips
|
||||||
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// ipv4 only subnets
|
// ipv4 only subnets
|
||||||
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
@@ -69,7 +102,7 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
@@ -82,7 +115,7 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
@@ -96,7 +129,7 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.Nil(t, ca(args, ob, eb))
|
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
@@ -122,19 +155,67 @@ func Test_ca(t *testing.T) {
|
|||||||
assert.Equal(t, "", lCrt.Details.Issuer)
|
assert.Equal(t, "", lCrt.Details.Issuer)
|
||||||
assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey))
|
assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey))
|
||||||
|
|
||||||
|
// test encrypted key
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
|
assert.Nil(t, ca(args, ob, eb, testpw))
|
||||||
|
assert.Equal(t, pwPromptOb, ob.String())
|
||||||
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
|
// read encrypted key file and verify default params
|
||||||
|
rb, _ = ioutil.ReadFile(keyF.Name())
|
||||||
|
k, _ := pem.Decode(rb)
|
||||||
|
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
// we won't know salt in advance, so just check start of string
|
||||||
|
assert.Equal(t, uint32(2*1024*1024), ned.EncryptionMetadata.Argon2Parameters.Memory)
|
||||||
|
assert.Equal(t, uint8(4), ned.EncryptionMetadata.Argon2Parameters.Parallelism)
|
||||||
|
assert.Equal(t, uint32(1), ned.EncryptionMetadata.Argon2Parameters.Iterations)
|
||||||
|
|
||||||
|
// verify the key is valid and decrypt-able
|
||||||
|
var curve cert.Curve
|
||||||
|
curve, lKey, b, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rb)
|
||||||
|
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Len(t, b, 0)
|
||||||
|
assert.Len(t, lKey, 64)
|
||||||
|
|
||||||
|
// test when reading passsword results in an error
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
|
assert.Error(t, ca(args, ob, eb, errpw))
|
||||||
|
assert.Equal(t, pwPromptOb, ob.String())
|
||||||
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
|
// test when user fails to enter a password
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
|
assert.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||||
|
assert.Equal(t, strings.Repeat(pwPromptOb, 5), ob.String()) // prompts 5 times before giving up
|
||||||
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create valid cert/key for overwrite tests
|
// create valid cert/key for overwrite tests
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.Nil(t, ca(args, ob, eb))
|
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing certificate file
|
// test that we won't overwrite existing certificate file
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA key: "+keyF.Name())
|
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name())
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
@@ -143,7 +224,7 @@ func Test_ca(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA cert: "+crtF.Name())
|
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name())
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|||||||
@@ -14,6 +14,8 @@ type keygenFlags struct {
|
|||||||
set *flag.FlagSet
|
set *flag.FlagSet
|
||||||
outKeyPath *string
|
outKeyPath *string
|
||||||
outPubPath *string
|
outPubPath *string
|
||||||
|
|
||||||
|
curve *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newKeygenFlags() *keygenFlags {
|
func newKeygenFlags() *keygenFlags {
|
||||||
@@ -21,6 +23,7 @@ func newKeygenFlags() *keygenFlags {
|
|||||||
cf.set.Usage = func() {}
|
cf.set.Usage = func() {}
|
||||||
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
|
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
|
||||||
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
|
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
|
||||||
|
cf.curve = cf.set.String("curve", "25519", "ECDH Curve (25519, P256)")
|
||||||
return &cf
|
return &cf
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,14 +41,25 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, rawPriv := x25519Keypair()
|
var pub, rawPriv []byte
|
||||||
|
var curve cert.Curve
|
||||||
|
switch *cf.curve {
|
||||||
|
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||||
|
pub, rawPriv = x25519Keypair()
|
||||||
|
curve = cert.Curve_CURVE25519
|
||||||
|
case "P256":
|
||||||
|
pub, rawPriv = p256Keypair()
|
||||||
|
curve = cert.Curve_P256
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||||
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600)
|
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalX25519PublicKey(pub), 0600)
|
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-pub: %s", err)
|
return fmt.Errorf("error while writing out-pub: %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ func Test_keygenHelp(t *testing.T) {
|
|||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"Usage of "+os.Args[0]+" keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`\n"+
|
"Usage of "+os.Args[0]+" keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`\n"+
|
||||||
|
" -curve string\n"+
|
||||||
|
" \tECDH Curve (25519, P256) (default \"25519\")\n"+
|
||||||
" -out-key string\n"+
|
" -out-key string\n"+
|
||||||
" \tRequired: path to write the private key to\n"+
|
" \tRequired: path to write the private key to\n"+
|
||||||
" -out-pub string\n"+
|
" -out-pub string\n"+
|
||||||
|
|||||||
@@ -62,11 +62,11 @@ func main() {
|
|||||||
|
|
||||||
switch args[0] {
|
switch args[0] {
|
||||||
case "ca":
|
case "ca":
|
||||||
err = ca(args[1:], os.Stdout, os.Stderr)
|
err = ca(args[1:], os.Stdout, os.Stderr, StdinPasswordReader{})
|
||||||
case "keygen":
|
case "keygen":
|
||||||
err = keygen(args[1:], os.Stdout, os.Stderr)
|
err = keygen(args[1:], os.Stdout, os.Stderr)
|
||||||
case "sign":
|
case "sign":
|
||||||
err = signCert(args[1:], os.Stdout, os.Stderr)
|
err = signCert(args[1:], os.Stdout, os.Stderr, StdinPasswordReader{})
|
||||||
case "print":
|
case "print":
|
||||||
err = printCert(args[1:], os.Stdout, os.Stderr)
|
err = printCert(args[1:], os.Stdout, os.Stderr)
|
||||||
case "verify":
|
case "verify":
|
||||||
@@ -127,6 +127,8 @@ func help(err string, out io.Writer) {
|
|||||||
fmt.Fprintln(out, " "+signSummary())
|
fmt.Fprintln(out, " "+signSummary())
|
||||||
fmt.Fprintln(out, " "+printSummary())
|
fmt.Fprintln(out, " "+printSummary())
|
||||||
fmt.Fprintln(out, " "+verifySummary())
|
fmt.Fprintln(out, " "+verifySummary())
|
||||||
|
fmt.Fprintln(out, "")
|
||||||
|
fmt.Fprintf(out, " To see usage for a given mode, use %s <mode> -h\n", os.Args[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustFlagString(name string, val *string) error {
|
func mustFlagString(name string, val *string) error {
|
||||||
|
|||||||
@@ -22,7 +22,9 @@ func Test_help(t *testing.T) {
|
|||||||
" " + keygenSummary() + "\n" +
|
" " + keygenSummary() + "\n" +
|
||||||
" " + signSummary() + "\n" +
|
" " + signSummary() + "\n" +
|
||||||
" " + printSummary() + "\n" +
|
" " + printSummary() + "\n" +
|
||||||
" " + verifySummary() + "\n"
|
" " + verifySummary() + "\n" +
|
||||||
|
"\n" +
|
||||||
|
" To see usage for a given mode, use " + os.Args[0] + " <mode> -h\n"
|
||||||
|
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
|
|
||||||
|
|||||||
28
cmd/nebula-cert/passwords.go
Normal file
28
cmd/nebula-cert/passwords.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/term"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrNoTerminal = errors.New("cannot read password from nonexistent terminal")
|
||||||
|
|
||||||
|
type PasswordReader interface {
|
||||||
|
ReadPassword() ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type StdinPasswordReader struct{}
|
||||||
|
|
||||||
|
func (pr StdinPasswordReader) ReadPassword() ([]byte, error) {
|
||||||
|
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||||
|
return nil, ErrNoTerminal
|
||||||
|
}
|
||||||
|
|
||||||
|
password, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
return password, err
|
||||||
|
}
|
||||||
10
cmd/nebula-cert/passwords_test.go
Normal file
10
cmd/nebula-cert/passwords_test.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
type StubPasswordReader struct {
|
||||||
|
password []byte
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pr *StubPasswordReader) ReadPassword() ([]byte, error) {
|
||||||
|
return pr.password, pr.err
|
||||||
|
}
|
||||||
@@ -87,7 +87,7 @@ func Test_printCert(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n",
|
"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n",
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
@@ -115,7 +115,7 @@ func Test_printCert(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n",
|
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n",
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
assert.Equal(t, "", eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ecdh"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -49,7 +50,7 @@ func newSignFlags() *signFlags {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
|
||||||
sf := newSignFlags()
|
sf := newSignFlags()
|
||||||
err := sf.set.Parse(args)
|
err := sf.set.Parse(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -77,8 +78,37 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while reading ca-key: %s", err)
|
return fmt.Errorf("error while reading ca-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
caKey, _, err := cert.UnmarshalEd25519PrivateKey(rawCAKey)
|
var curve cert.Curve
|
||||||
if err != nil {
|
var caKey []byte
|
||||||
|
|
||||||
|
// naively attempt to decode the private key as though it is not encrypted
|
||||||
|
caKey, _, curve, err = cert.UnmarshalSigningPrivateKey(rawCAKey)
|
||||||
|
if err == cert.ErrPrivateKeyEncrypted {
|
||||||
|
// ask for a passphrase until we get one
|
||||||
|
var passphrase []byte
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
out.Write([]byte("Enter passphrase: "))
|
||||||
|
passphrase, err = pr.ReadPassword()
|
||||||
|
|
||||||
|
if err == ErrNoTerminal {
|
||||||
|
return fmt.Errorf("ca-key is encrypted and must be decrypted interactively")
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("error reading password: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(passphrase) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
return fmt.Errorf("cannot open encrypted ca-key without passphrase")
|
||||||
|
}
|
||||||
|
|
||||||
|
curve, caKey, _, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rawCAKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while parsing encrypted ca-key: %s", err)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
return fmt.Errorf("error while parsing ca-key: %s", err)
|
return fmt.Errorf("error while parsing ca-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,7 +122,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := caCert.VerifyPrivateKey(caKey); err != nil {
|
if err := caCert.VerifyPrivateKey(curve, caKey); err != nil {
|
||||||
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,12 +182,16 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading in-pub: %s", err)
|
return fmt.Errorf("error while reading in-pub: %s", err)
|
||||||
}
|
}
|
||||||
pub, _, err = cert.UnmarshalX25519PublicKey(rawPub)
|
var pubCurve cert.Curve
|
||||||
|
pub, _, pubCurve, err = cert.UnmarshalPublicKey(rawPub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while parsing in-pub: %s", err)
|
return fmt.Errorf("error while parsing in-pub: %s", err)
|
||||||
}
|
}
|
||||||
|
if pubCurve != curve {
|
||||||
|
return fmt.Errorf("curve of in-pub does not match ca")
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
pub, rawPriv = x25519Keypair()
|
pub, rawPriv = newKeypair(curve)
|
||||||
}
|
}
|
||||||
|
|
||||||
nc := cert.NebulaCertificate{
|
nc := cert.NebulaCertificate{
|
||||||
@@ -171,6 +205,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: false,
|
IsCA: false,
|
||||||
Issuer: issuer,
|
Issuer: issuer,
|
||||||
|
Curve: curve,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +225,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
|
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(caKey)
|
err = nc.Sign(curve, caKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while signing: %s", err)
|
return fmt.Errorf("error while signing: %s", err)
|
||||||
}
|
}
|
||||||
@@ -200,7 +235,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600)
|
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
}
|
}
|
||||||
@@ -231,6 +266,17 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newKeypair(curve cert.Curve) ([]byte, []byte) {
|
||||||
|
switch curve {
|
||||||
|
case cert.Curve_CURVE25519:
|
||||||
|
return x25519Keypair()
|
||||||
|
case cert.Curve_P256:
|
||||||
|
return p256Keypair()
|
||||||
|
default:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func x25519Keypair() ([]byte, []byte) {
|
func x25519Keypair() ([]byte, []byte) {
|
||||||
privkey := make([]byte, 32)
|
privkey := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
@@ -245,6 +291,15 @@ func x25519Keypair() ([]byte, []byte) {
|
|||||||
return pubkey, privkey
|
return pubkey, privkey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func p256Keypair() ([]byte, []byte) {
|
||||||
|
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pubkey := privkey.PublicKey()
|
||||||
|
return pubkey.Bytes(), privkey.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
func signSummary() string {
|
func signSummary() string {
|
||||||
return "sign <flags>: create and sign a certificate"
|
return "sign <flags>: create and sign a certificate"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -58,17 +59,39 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
eb := &bytes.Buffer{}
|
eb := &bytes.Buffer{}
|
||||||
|
|
||||||
|
nopw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
errpw := &StubPasswordReader{
|
||||||
|
password: []byte(""),
|
||||||
|
err: errors.New("stub error"),
|
||||||
|
}
|
||||||
|
|
||||||
|
passphrase := []byte("DO NOT USE THIS KEY")
|
||||||
|
testpw := &StubPasswordReader{
|
||||||
|
password: passphrase,
|
||||||
|
err: nil,
|
||||||
|
}
|
||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-name is required")
|
assertHelpError(t, signCert(
|
||||||
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||||
|
), "-name is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-ip is required")
|
assertHelpError(t, signCert(
|
||||||
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||||
|
), "-ip is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// cannot set -in-pub and -out-key
|
// cannot set -in-pub and -out-key
|
||||||
assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb), "cannot set both -in-pub and -out-key")
|
assertHelpError(t, signCert(
|
||||||
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb, nopw,
|
||||||
|
), "cannot set both -in-pub and -out-key")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -76,7 +99,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
||||||
|
|
||||||
// failed to unmarshal key
|
// failed to unmarshal key
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
@@ -86,7 +109,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
defer os.Remove(caKeyF.Name())
|
defer os.Remove(caKeyF.Name())
|
||||||
|
|
||||||
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -98,7 +121,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
|
|
||||||
// failed to read cert
|
// failed to read cert
|
||||||
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -110,7 +133,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
defer os.Remove(caCrtF.Name())
|
defer os.Remove(caCrtF.Name())
|
||||||
|
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -129,7 +152,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
|
|
||||||
// failed to read pub
|
// failed to read pub
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -141,7 +164,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
defer os.Remove(inPubF.Name())
|
defer os.Remove(inPubF.Name())
|
||||||
|
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -155,14 +178,14 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: invalid CIDR address: a1.1.1.1/24")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: invalid CIDR address: a1.1.1.1/24")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -170,14 +193,14 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: invalid CIDR address: a")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: invalid CIDR address: a")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
||||||
assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -191,7 +214,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate does not match private key")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate does not match private key")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -199,7 +222,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -212,7 +235,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
@@ -226,7 +249,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -268,7 +291,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -283,7 +306,7 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -291,14 +314,14 @@ func Test_signCert(t *testing.T) {
|
|||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing key file
|
// test that we won't overwrite existing key file
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing key: "+keyF.Name())
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing key: "+keyF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@@ -306,14 +329,83 @@ func Test_signCert(t *testing.T) {
|
|||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.Nil(t, signCert(args, ob, eb))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing certificate file
|
// test that we won't overwrite existing certificate file
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name())
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing cert: "+crtF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// create valid cert/key using encrypted CA key
|
||||||
|
os.Remove(caKeyF.Name())
|
||||||
|
os.Remove(caCrtF.Name())
|
||||||
|
os.Remove(keyF.Name())
|
||||||
|
os.Remove(crtF.Name())
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
caKeyF, err = ioutil.TempFile("", "sign-cert.key")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(caKeyF.Name())
|
||||||
|
|
||||||
|
caCrtF, err = ioutil.TempFile("", "sign-cert.crt")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(caCrtF.Name())
|
||||||
|
|
||||||
|
// generate the encrypted key
|
||||||
|
caPub, caPriv, _ = ed25519.GenerateKey(rand.Reader)
|
||||||
|
kdfParams := cert.NewArgon2Parameters(64*1024, 4, 3)
|
||||||
|
b, _ = cert.EncryptAndMarshalSigningPrivateKey(cert.Curve_CURVE25519, caPriv, passphrase, kdfParams)
|
||||||
|
caKeyF.Write(b)
|
||||||
|
|
||||||
|
ca = cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "ca",
|
||||||
|
NotBefore: time.Now(),
|
||||||
|
NotAfter: time.Now().Add(time.Minute * 200),
|
||||||
|
PublicKey: caPub,
|
||||||
|
IsCA: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
b, _ = ca.MarshalToPEM()
|
||||||
|
caCrtF.Write(b)
|
||||||
|
|
||||||
|
// test with the proper password
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Nil(t, signCert(args, ob, eb, testpw))
|
||||||
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// test with the wrong password
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
testpw.password = []byte("invalid password")
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Error(t, signCert(args, ob, eb, testpw))
|
||||||
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// test with the user not entering a password
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Error(t, signCert(args, ob, eb, nopw))
|
||||||
|
// normally the user hitting enter on the prompt would add newlines between these
|
||||||
|
assert.Equal(t, "Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// test an error condition
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
|
assert.Error(t, signCert(args, ob, eb, errpw))
|
||||||
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ func Test_verify(t *testing.T) {
|
|||||||
IsCA: true,
|
IsCA: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
ca.Sign(caPriv)
|
ca.Sign(cert.Curve_CURVE25519, caPriv)
|
||||||
b, _ := ca.MarshalToPEM()
|
b, _ := ca.MarshalToPEM()
|
||||||
caFile.Truncate(0)
|
caFile.Truncate(0)
|
||||||
caFile.Seek(0, 0)
|
caFile.Seek(0, 0)
|
||||||
@@ -117,7 +117,7 @@ func Test_verify(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
crt.Sign(badPriv)
|
crt.Sign(cert.Curve_CURVE25519, badPriv)
|
||||||
b, _ = crt.MarshalToPEM()
|
b, _ = crt.MarshalToPEM()
|
||||||
certFile.Truncate(0)
|
certFile.Truncate(0)
|
||||||
certFile.Seek(0, 0)
|
certFile.Seek(0, 0)
|
||||||
@@ -129,7 +129,7 @@ func Test_verify(t *testing.T) {
|
|||||||
assert.EqualError(t, err, "certificate signature did not match")
|
assert.EqualError(t, err, "certificate signature did not match")
|
||||||
|
|
||||||
// verified cert at path
|
// verified cert at path
|
||||||
crt.Sign(caPriv)
|
crt.Sign(cert.Curve_CURVE25519, caPriv)
|
||||||
b, _ = crt.MarshalToPEM()
|
b, _ = crt.MarshalToPEM()
|
||||||
certFile.Truncate(0)
|
certFile.Truncate(0)
|
||||||
certFile.Seek(0, 0)
|
certFile.Seek(0, 0)
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
// A version string that can be set with
|
// A version string that can be set with
|
||||||
//
|
//
|
||||||
// -ldflags "-X main.Build=SOMEVERSION"
|
// -ldflags "-X main.Build=SOMEVERSION"
|
||||||
//
|
//
|
||||||
// at compile-time.
|
// at compile-time.
|
||||||
var Build string
|
var Build string
|
||||||
|
|||||||
@@ -49,6 +49,14 @@ func (p *program) Stop(s service.Service) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fileExists(filename string) bool {
|
||||||
|
_, err := os.Stat(filename)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
|
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
|
||||||
if *configPath == "" {
|
if *configPath == "" {
|
||||||
ex, err := os.Executable()
|
ex, err := os.Executable()
|
||||||
@@ -56,6 +64,9 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
*configPath = filepath.Dir(ex) + "/config.yaml"
|
*configPath = filepath.Dir(ex) + "/config.yaml"
|
||||||
|
if !fileExists(*configPath) {
|
||||||
|
*configPath = filepath.Dir(ex) + "/config.yml"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
svcConfig := &service.Config{
|
svcConfig := &service.Config{
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
// A version string that can be set with
|
// A version string that can be set with
|
||||||
//
|
//
|
||||||
// -ldflags "-X main.Build=SOMEVERSION"
|
// -ldflags "-X main.Build=SOMEVERSION"
|
||||||
//
|
//
|
||||||
// at compile-time.
|
// at compile-time.
|
||||||
var Build string
|
var Build string
|
||||||
|
|||||||
@@ -7,8 +7,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/imdario/mergo"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig_Load(t *testing.T) {
|
func TestConfig_Load(t *testing.T) {
|
||||||
@@ -147,3 +150,77 @@ func TestConfig_ReloadConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure mergo merges are done the way we expect.
|
||||||
|
// This is needed to test for potential regressions, like:
|
||||||
|
// - https://github.com/imdario/mergo/issues/187
|
||||||
|
func TestConfig_MergoMerge(t *testing.T) {
|
||||||
|
configs := [][]byte{
|
||||||
|
[]byte(`
|
||||||
|
listen:
|
||||||
|
port: 1234
|
||||||
|
`),
|
||||||
|
[]byte(`
|
||||||
|
firewall:
|
||||||
|
inbound:
|
||||||
|
- port: 443
|
||||||
|
proto: tcp
|
||||||
|
groups:
|
||||||
|
- server
|
||||||
|
- port: 443
|
||||||
|
proto: tcp
|
||||||
|
groups:
|
||||||
|
- webapp
|
||||||
|
`),
|
||||||
|
[]byte(`
|
||||||
|
listen:
|
||||||
|
host: 0.0.0.0
|
||||||
|
port: 4242
|
||||||
|
firewall:
|
||||||
|
outbound:
|
||||||
|
- port: any
|
||||||
|
proto: any
|
||||||
|
host: any
|
||||||
|
inbound:
|
||||||
|
- port: any
|
||||||
|
proto: icmp
|
||||||
|
host: any
|
||||||
|
`),
|
||||||
|
}
|
||||||
|
|
||||||
|
var m map[any]any
|
||||||
|
|
||||||
|
// merge the same way config.parse() merges
|
||||||
|
for _, b := range configs {
|
||||||
|
var nm map[any]any
|
||||||
|
err := yaml.Unmarshal(b, &nm)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// We need to use WithAppendSlice so that firewall rules in separate
|
||||||
|
// files are appended together
|
||||||
|
err = mergo.Merge(&nm, m, mergo.WithAppendSlice)
|
||||||
|
m = nm
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Merged Config: %#v", m)
|
||||||
|
mYaml, err := yaml.Marshal(m)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("Merged Config as YAML:\n%s", mYaml)
|
||||||
|
|
||||||
|
// If a bug is present, some items might be replaced instead of merged like we expect
|
||||||
|
expected := map[any]any{
|
||||||
|
"firewall": map[any]any{
|
||||||
|
"inbound": []any{
|
||||||
|
map[any]any{"host": "any", "port": "any", "proto": "icmp"},
|
||||||
|
map[any]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
|
||||||
|
map[any]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
|
||||||
|
"outbound": []any{
|
||||||
|
map[any]any{"host": "any", "port": "any", "proto": "any"}}},
|
||||||
|
"listen": map[any]any{
|
||||||
|
"host": "0.0.0.0",
|
||||||
|
"port": 4242,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Equal(t, expected, m)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,150 +1,154 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet
|
type trafficDecision int
|
||||||
// and something like every 10 packets we could lock, send 10, then unlock for a moment
|
|
||||||
|
const (
|
||||||
|
doNothing trafficDecision = 0
|
||||||
|
deleteTunnel trafficDecision = 1 // delete the hostinfo on our side, do not notify the remote
|
||||||
|
closeTunnel trafficDecision = 2 // delete the hostinfo and notify the remote
|
||||||
|
swapPrimary trafficDecision = 3
|
||||||
|
migrateRelays trafficDecision = 4
|
||||||
|
tryRehandshake trafficDecision = 5
|
||||||
|
)
|
||||||
|
|
||||||
type connectionManager struct {
|
type connectionManager struct {
|
||||||
hostMap *HostMap
|
in map[uint32]struct{}
|
||||||
in map[iputil.VpnIp]struct{}
|
inLock *sync.RWMutex
|
||||||
inLock *sync.RWMutex
|
|
||||||
inCount int
|
|
||||||
out map[iputil.VpnIp]struct{}
|
|
||||||
outLock *sync.RWMutex
|
|
||||||
outCount int
|
|
||||||
TrafficTimer *SystemTimerWheel
|
|
||||||
intf *Interface
|
|
||||||
|
|
||||||
pendingDeletion map[iputil.VpnIp]int
|
out map[uint32]struct{}
|
||||||
pendingDeletionLock *sync.RWMutex
|
outLock *sync.RWMutex
|
||||||
pendingDeletionTimer *SystemTimerWheel
|
|
||||||
|
|
||||||
checkInterval int
|
// relayUsed holds which relay localIndexs are in use
|
||||||
pendingDeletionInterval int
|
relayUsed map[uint32]struct{}
|
||||||
|
relayUsedLock *sync.RWMutex
|
||||||
|
|
||||||
|
hostMap *HostMap
|
||||||
|
trafficTimer *LockingTimerWheel[uint32]
|
||||||
|
intf *Interface
|
||||||
|
pendingDeletion map[uint32]struct{}
|
||||||
|
punchy *Punchy
|
||||||
|
checkInterval time.Duration
|
||||||
|
pendingDeletionInterval time.Duration
|
||||||
|
metricsTxPunchy metrics.Counter
|
||||||
|
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
// I wanted to call one matLock
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager {
|
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
||||||
|
var max time.Duration
|
||||||
|
if checkInterval < pendingDeletionInterval {
|
||||||
|
max = pendingDeletionInterval
|
||||||
|
} else {
|
||||||
|
max = checkInterval
|
||||||
|
}
|
||||||
|
|
||||||
nc := &connectionManager{
|
nc := &connectionManager{
|
||||||
hostMap: intf.hostMap,
|
hostMap: intf.hostMap,
|
||||||
in: make(map[iputil.VpnIp]struct{}),
|
in: make(map[uint32]struct{}),
|
||||||
inLock: &sync.RWMutex{},
|
inLock: &sync.RWMutex{},
|
||||||
inCount: 0,
|
out: make(map[uint32]struct{}),
|
||||||
out: make(map[iputil.VpnIp]struct{}),
|
|
||||||
outLock: &sync.RWMutex{},
|
outLock: &sync.RWMutex{},
|
||||||
outCount: 0,
|
relayUsed: make(map[uint32]struct{}),
|
||||||
TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
relayUsedLock: &sync.RWMutex{},
|
||||||
|
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
|
||||||
intf: intf,
|
intf: intf,
|
||||||
pendingDeletion: make(map[iputil.VpnIp]int),
|
pendingDeletion: make(map[uint32]struct{}),
|
||||||
pendingDeletionLock: &sync.RWMutex{},
|
|
||||||
pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
|
||||||
checkInterval: checkInterval,
|
checkInterval: checkInterval,
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
pendingDeletionInterval: pendingDeletionInterval,
|
||||||
|
punchy: punchy,
|
||||||
|
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
nc.Start(ctx)
|
nc.Start(ctx)
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) In(ip iputil.VpnIp) {
|
func (n *connectionManager) In(localIndex uint32) {
|
||||||
n.inLock.RLock()
|
n.inLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.in[ip]; ok {
|
if _, ok := n.in[localIndex]; ok {
|
||||||
n.inLock.RUnlock()
|
n.inLock.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.inLock.RUnlock()
|
n.inLock.RUnlock()
|
||||||
n.inLock.Lock()
|
n.inLock.Lock()
|
||||||
n.in[ip] = struct{}{}
|
n.in[localIndex] = struct{}{}
|
||||||
n.inLock.Unlock()
|
n.inLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Out(ip iputil.VpnIp) {
|
func (n *connectionManager) Out(localIndex uint32) {
|
||||||
n.outLock.RLock()
|
n.outLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.out[ip]; ok {
|
if _, ok := n.out[localIndex]; ok {
|
||||||
n.outLock.RUnlock()
|
n.outLock.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.outLock.RUnlock()
|
n.outLock.RUnlock()
|
||||||
n.outLock.Lock()
|
n.outLock.Lock()
|
||||||
// double check since we dropped the lock temporarily
|
n.out[localIndex] = struct{}{}
|
||||||
if _, ok := n.out[ip]; ok {
|
n.outLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
||||||
|
n.relayUsedLock.RLock()
|
||||||
|
// If this already exists, return
|
||||||
|
if _, ok := n.relayUsed[localIndex]; ok {
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
n.relayUsedLock.Lock()
|
||||||
|
n.relayUsed[localIndex] = struct{}{}
|
||||||
|
n.relayUsedLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
||||||
|
// resets the state for this local index
|
||||||
|
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
||||||
|
n.inLock.Lock()
|
||||||
|
n.outLock.Lock()
|
||||||
|
_, in := n.in[localIndex]
|
||||||
|
_, out := n.out[localIndex]
|
||||||
|
delete(n.in, localIndex)
|
||||||
|
delete(n.out, localIndex)
|
||||||
|
n.inLock.Unlock()
|
||||||
|
n.outLock.Unlock()
|
||||||
|
return in, out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
||||||
|
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
||||||
|
n.outLock.Lock()
|
||||||
|
if _, ok := n.out[localIndex]; ok {
|
||||||
n.outLock.Unlock()
|
n.outLock.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.out[ip] = struct{}{}
|
n.out[localIndex] = struct{}{}
|
||||||
n.AddTrafficWatch(ip, n.checkInterval)
|
n.trafficTimer.Add(localIndex, n.checkInterval)
|
||||||
n.outLock.Unlock()
|
n.outLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool {
|
|
||||||
n.inLock.RLock()
|
|
||||||
if _, ok := n.in[vpnIp]; ok {
|
|
||||||
n.inLock.RUnlock()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
n.inLock.RUnlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) ClearIP(ip iputil.VpnIp) {
|
|
||||||
n.inLock.Lock()
|
|
||||||
n.outLock.Lock()
|
|
||||||
delete(n.in, ip)
|
|
||||||
delete(n.out, ip)
|
|
||||||
n.inLock.Unlock()
|
|
||||||
n.outLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) ClearPendingDeletion(ip iputil.VpnIp) {
|
|
||||||
n.pendingDeletionLock.Lock()
|
|
||||||
delete(n.pendingDeletion, ip)
|
|
||||||
n.pendingDeletionLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) AddPendingDeletion(ip iputil.VpnIp) {
|
|
||||||
n.pendingDeletionLock.Lock()
|
|
||||||
if _, ok := n.pendingDeletion[ip]; ok {
|
|
||||||
n.pendingDeletion[ip] += 1
|
|
||||||
} else {
|
|
||||||
n.pendingDeletion[ip] = 0
|
|
||||||
}
|
|
||||||
n.pendingDeletionTimer.Add(ip, time.Second*time.Duration(n.pendingDeletionInterval))
|
|
||||||
n.pendingDeletionLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool {
|
|
||||||
n.pendingDeletionLock.RLock()
|
|
||||||
if _, ok := n.pendingDeletion[ip]; ok {
|
|
||||||
|
|
||||||
n.pendingDeletionLock.RUnlock()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
n.pendingDeletionLock.RUnlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) AddTrafficWatch(vpnIp iputil.VpnIp, seconds int) {
|
|
||||||
n.TrafficTimer.Add(vpnIp, time.Second*time.Duration(seconds))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) Start(ctx context.Context) {
|
func (n *connectionManager) Start(ctx context.Context) {
|
||||||
go n.Run(ctx)
|
go n.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Run(ctx context.Context) {
|
func (n *connectionManager) Run(ctx context.Context) {
|
||||||
|
//TODO: this tick should be based on the min wheel tick? Check firewall
|
||||||
clockSource := time.NewTicker(500 * time.Millisecond)
|
clockSource := time.NewTicker(500 * time.Millisecond)
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
@@ -156,154 +160,331 @@ func (n *connectionManager) Run(ctx context.Context) {
|
|||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
|
||||||
case now := <-clockSource.C:
|
case now := <-clockSource.C:
|
||||||
n.HandleMonitorTick(now, p, nb, out)
|
n.trafficTimer.Advance(now)
|
||||||
n.HandleDeletionTick(now)
|
for {
|
||||||
|
localIndex, has := n.trafficTimer.Purge()
|
||||||
|
if !has {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
n.doTrafficCheck(localIndex, p, nb, out, now)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) {
|
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||||
n.TrafficTimer.advance(now)
|
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now)
|
||||||
for {
|
|
||||||
ep := n.TrafficTimer.Purge()
|
switch decision {
|
||||||
if ep == nil {
|
case deleteTunnel:
|
||||||
break
|
if n.hostMap.DeleteHostInfo(hostinfo) {
|
||||||
|
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||||
|
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp := ep.(iputil.VpnIp)
|
case closeTunnel:
|
||||||
|
n.intf.sendCloseTunnel(hostinfo)
|
||||||
|
n.intf.closeTunnel(hostinfo)
|
||||||
|
|
||||||
// Check for traffic coming back in from this host.
|
case swapPrimary:
|
||||||
traf := n.CheckIn(vpnIp)
|
n.swapPrimary(hostinfo, primary)
|
||||||
|
|
||||||
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
|
case migrateRelays:
|
||||||
if err != nil {
|
n.migrateRelayUsed(hostinfo, primary)
|
||||||
n.l.Debugf("Not found in hostmap: %s", vpnIp)
|
|
||||||
n.ClearIP(vpnIp)
|
case tryRehandshake:
|
||||||
n.ClearPendingDeletion(vpnIp)
|
n.tryRehandshake(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
n.resetRelayTrafficCheck(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||||
|
if hostinfo != nil {
|
||||||
|
n.relayUsedLock.Lock()
|
||||||
|
defer n.relayUsedLock.Unlock()
|
||||||
|
// No need to migrate any relays, delete usage info now.
|
||||||
|
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
|
delete(n.relayUsed, idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||||
|
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||||
|
|
||||||
|
for _, r := range relayFor {
|
||||||
|
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
||||||
|
|
||||||
|
var index uint32
|
||||||
|
var relayFrom iputil.VpnIp
|
||||||
|
var relayTo iputil.VpnIp
|
||||||
|
switch {
|
||||||
|
case ok && existing.State == Established:
|
||||||
|
// This relay already exists in newhostinfo, then do nothing.
|
||||||
continue
|
continue
|
||||||
}
|
case ok && existing.State == Requested:
|
||||||
|
// The relay exists in a Requested state; re-send the request
|
||||||
if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
|
index = existing.LocalIndex
|
||||||
continue
|
switch r.Type {
|
||||||
}
|
case TerminalType:
|
||||||
|
relayFrom = newhostinfo.vpnIp
|
||||||
// If we saw an incoming packets from this ip and peer's certificate is not
|
relayTo = existing.PeerIp
|
||||||
// expired, just ignore.
|
case ForwardingType:
|
||||||
if traf {
|
relayFrom = existing.PeerIp
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
relayTo = newhostinfo.vpnIp
|
||||||
n.l.WithField("vpnIp", vpnIp).
|
default:
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
// should never happen
|
||||||
Debug("Tunnel status")
|
}
|
||||||
|
case !ok:
|
||||||
|
n.relayUsedLock.RLock()
|
||||||
|
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
||||||
|
// The relay hasn't been used; don't migrate it.
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
// The relay doesn't exist at all; create some relay state and send the request.
|
||||||
|
var err error
|
||||||
|
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||||
|
if err != nil {
|
||||||
|
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch r.Type {
|
||||||
|
case TerminalType:
|
||||||
|
relayFrom = newhostinfo.vpnIp
|
||||||
|
relayTo = r.PeerIp
|
||||||
|
case ForwardingType:
|
||||||
|
relayFrom = r.PeerIp
|
||||||
|
relayTo = newhostinfo.vpnIp
|
||||||
|
default:
|
||||||
|
// should never happen
|
||||||
}
|
}
|
||||||
n.ClearIP(vpnIp)
|
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger(n.l).
|
// Send a CreateRelayRequest to the peer.
|
||||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
req := NebulaControl{
|
||||||
Debug("Tunnel status")
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
|
InitiatorRelayIndex: index,
|
||||||
if hostinfo != nil && hostinfo.ConnectionState != nil {
|
RelayFromIp: uint32(relayFrom),
|
||||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
RelayToIp: uint32(relayTo),
|
||||||
n.intf.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, p, nb, out)
|
}
|
||||||
|
msg, err := req.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||||
} else {
|
} else {
|
||||||
hostinfo.logger(n.l).Debugf("Hostinfo sadness: %s", vpnIp)
|
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
n.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": iputil.VpnIp(req.RelayFromIp),
|
||||||
|
"relayTo": iputil.VpnIp(req.RelayToIp),
|
||||||
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||||
|
"responderRelayIndex": req.ResponderRelayIndex,
|
||||||
|
"vpnIp": newhostinfo.vpnIp}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
n.AddPendingDeletion(vpnIp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) HandleDeletionTick(now time.Time) {
|
func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||||
n.pendingDeletionTimer.advance(now)
|
n.hostMap.RLock()
|
||||||
for {
|
defer n.hostMap.RUnlock()
|
||||||
ep := n.pendingDeletionTimer.Purge()
|
|
||||||
if ep == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
vpnIp := ep.(iputil.VpnIp)
|
hostinfo := n.hostMap.Indexes[localIndex]
|
||||||
|
if hostinfo == nil {
|
||||||
|
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
||||||
|
delete(n.pendingDeletion, localIndex)
|
||||||
|
return doNothing, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
|
if n.isInvalidCertificate(now, hostinfo) {
|
||||||
if err != nil {
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
n.l.Debugf("Not found in hostmap: %s", vpnIp)
|
return closeTunnel, hostinfo, nil
|
||||||
n.ClearIP(vpnIp)
|
}
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
|
primary := n.hostMap.Hosts[hostinfo.vpnIp]
|
||||||
continue
|
mainHostInfo := true
|
||||||
}
|
if primary != nil && primary != hostinfo {
|
||||||
|
mainHostInfo = false
|
||||||
|
}
|
||||||
|
|
||||||
// If we saw an incoming packets from this ip and peer's certificate is not
|
// Check for traffic on this hostinfo
|
||||||
// expired, just ignore.
|
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
||||||
traf := n.CheckIn(vpnIp)
|
|
||||||
if traf {
|
|
||||||
n.l.WithField("vpnIp", vpnIp).
|
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "active"}).
|
|
||||||
Debug("Tunnel status")
|
|
||||||
|
|
||||||
n.ClearIP(vpnIp)
|
// A hostinfo is determined alive if there is incoming traffic
|
||||||
n.ClearPendingDeletion(vpnIp)
|
if inTraffic {
|
||||||
continue
|
decision := doNothing
|
||||||
}
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
|
|
||||||
// If it comes around on deletion wheel and hasn't resolved itself, delete
|
|
||||||
if n.checkPendingDeletion(vpnIp) {
|
|
||||||
cn := ""
|
|
||||||
if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil {
|
|
||||||
cn = hostinfo.ConnectionState.peerCert.Details.Name
|
|
||||||
}
|
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(n.l).
|
||||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||||
WithField("certName", cn).
|
Debug("Tunnel status")
|
||||||
Info("Tunnel status")
|
}
|
||||||
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
|
||||||
|
if mainHostInfo {
|
||||||
|
decision = tryRehandshake
|
||||||
|
|
||||||
n.ClearIP(vpnIp)
|
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
// TODO: This is only here to let tests work. Should do proper mocking
|
|
||||||
if n.intf.lightHouse != nil {
|
|
||||||
n.intf.lightHouse.DeleteVpnIp(vpnIp)
|
|
||||||
}
|
|
||||||
n.hostMap.DeleteHostInfo(hostinfo)
|
|
||||||
} else {
|
} else {
|
||||||
n.ClearIP(vpnIp)
|
if n.shouldSwapPrimary(hostinfo, primary) {
|
||||||
n.ClearPendingDeletion(vpnIp)
|
decision = swapPrimary
|
||||||
|
} else {
|
||||||
|
// migrate the relays to the primary, if in use.
|
||||||
|
decision = migrateRelays
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||||
|
|
||||||
|
if !outTraffic {
|
||||||
|
// Send a punch packet to keep the NAT state alive
|
||||||
|
n.sendPunch(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return decision, hostinfo, primary
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
||||||
|
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
||||||
|
hostinfo.logger(n.l).
|
||||||
|
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||||
|
Info("Tunnel status")
|
||||||
|
|
||||||
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
return deleteTunnel, hostinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||||
|
if !outTraffic {
|
||||||
|
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||||
|
// Just maintain NAT state if configured to do so.
|
||||||
|
n.sendPunch(hostinfo)
|
||||||
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||||
|
return doNothing, nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.punchy.GetTargetEverything() {
|
||||||
|
// This is similar to the old punchy behavior with a slight optimization.
|
||||||
|
// We aren't receiving traffic but we are sending it, punch on all known
|
||||||
|
// ips in case we need to re-prime NAT state
|
||||||
|
n.sendPunch(hostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
|
hostinfo.logger(n.l).
|
||||||
|
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||||
|
Debug("Tunnel status")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||||
|
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
|
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
||||||
|
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
||||||
|
return doNothing, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid
|
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||||
func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil.VpnIp, hostinfo *HostInfo) bool {
|
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
||||||
if !n.intf.disconnectInvalid {
|
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||||
|
// Let's sort this out.
|
||||||
|
|
||||||
|
if current.vpnIp < n.intf.myVpnIp {
|
||||||
|
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
||||||
|
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
||||||
|
// The remotes vpn ip is lower than mine. I will not flip.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
certState := n.intf.certState.Load()
|
||||||
|
return bytes.Equal(current.ConnectionState.certState.certificate.Signature, certState.certificate.Signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||||
|
n.hostMap.Lock()
|
||||||
|
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||||
|
if n.hostMap.Hosts[current.vpnIp] == primary {
|
||||||
|
n.hostMap.unlockedMakePrimary(current)
|
||||||
|
}
|
||||||
|
n.hostMap.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
||||||
|
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
||||||
|
// check and return true.
|
||||||
|
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||||
remoteCert := hostinfo.GetCert()
|
remoteCert := hostinfo.GetCert()
|
||||||
if remoteCert == nil {
|
if remoteCert == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
valid, err := remoteCert.Verify(now, n.intf.caPool)
|
valid, err := remoteCert.VerifyWithCache(now, n.intf.caPool)
|
||||||
if valid {
|
if valid {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !n.intf.disconnectInvalid && err != cert.ErrBlockListed {
|
||||||
|
// Block listed certificates should always be disconnected
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
fingerprint, _ := remoteCert.Sha256Sum()
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
n.l.WithField("vpnIp", vpnIp).WithError(err).
|
hostinfo.logger(n.l).WithError(err).
|
||||||
WithField("certName", remoteCert.Details.Name).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||||
|
|
||||||
// Inform the remote and close the tunnel locally
|
|
||||||
n.intf.sendCloseTunnel(hostinfo)
|
|
||||||
n.intf.closeTunnel(hostinfo)
|
|
||||||
|
|
||||||
n.ClearIP(vpnIp)
|
|
||||||
n.ClearPendingDeletion(vpnIp)
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||||
|
if !n.punchy.GetPunch() {
|
||||||
|
// Punching is disabled
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.punchy.GetTargetEverything() {
|
||||||
|
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||||
|
n.metricsTxPunchy.Inc(1)
|
||||||
|
n.intf.outside.WriteTo([]byte{1}, addr)
|
||||||
|
})
|
||||||
|
|
||||||
|
} else if hostinfo.remote != nil {
|
||||||
|
n.metricsTxPunchy.Inc(1)
|
||||||
|
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||||
|
certState := n.intf.certState.Load()
|
||||||
|
if bytes.Equal(hostinfo.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||||
|
WithField("reason", "local certificate is not current").
|
||||||
|
Info("Re-handshaking with remote")
|
||||||
|
|
||||||
|
//TODO: this is copied from getOrHandshake to keep the extra checks out of the hot path, figure it out
|
||||||
|
newHostinfo := n.intf.handshakeManager.AddVpnIp(hostinfo.vpnIp, n.intf.initHostInfo)
|
||||||
|
if !newHostinfo.HandshakeReady {
|
||||||
|
ixHandshakeStage0(n.intf, newHostinfo.vpnIp, newHostinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
//If this is a static host, we don't need to wait for the HostQueryReply
|
||||||
|
//We can trigger the handshake right now
|
||||||
|
if _, ok := n.intf.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||||
|
select {
|
||||||
|
case n.intf.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
@@ -18,6 +19,20 @@ import (
|
|||||||
|
|
||||||
var vpnIp iputil.VpnIp
|
var vpnIp iputil.VpnIp
|
||||||
|
|
||||||
|
func newTestLighthouse() *LightHouse {
|
||||||
|
lh := &LightHouse{
|
||||||
|
l: test.NewLogger(),
|
||||||
|
addrMap: map[iputil.VpnIp]*RemoteList{},
|
||||||
|
}
|
||||||
|
lighthouses := map[iputil.VpnIp]struct{}{}
|
||||||
|
staticList := map[iputil.VpnIp]struct{}{}
|
||||||
|
|
||||||
|
lh.lighthouses.Store(&lighthouses)
|
||||||
|
lh.staticList.Store(&staticList)
|
||||||
|
|
||||||
|
return lh
|
||||||
|
}
|
||||||
|
|
||||||
func Test_NewConnectionManagerTest(t *testing.T) {
|
func Test_NewConnectionManagerTest(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
@@ -35,57 +50,67 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
rawCertificateNoKey: []byte{},
|
rawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
|
lh := newTestLighthouse()
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &test.NoopTun{},
|
inside: &test.NoopTun{},
|
||||||
outside: &udp.Conn{},
|
outside: &udp.Conn{},
|
||||||
certState: cs,
|
|
||||||
firewall: &Firewall{},
|
firewall: &Firewall{},
|
||||||
lightHouse: lh,
|
lightHouse: lh,
|
||||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
now := time.Now()
|
ifce.certState.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
nc.HandleMonitorTick(now, p, nb, out)
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
hostinfo := &HostInfo{
|
||||||
|
vpnIp: vpnIp,
|
||||||
|
localIndexId: 1099,
|
||||||
|
remoteIndexId: 9901,
|
||||||
|
}
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
certState: cs,
|
certState: cs,
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
}
|
}
|
||||||
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(vpnIp)
|
nc.Out(hostinfo.localIndexId)
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
nc.In(hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
// Move ahead 5s. Nothing should happen
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
next_tick := now.Add(5 * time.Second)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// Move ahead 6s. We haven't heard back
|
|
||||||
next_tick = now.Add(6 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// This host should now be up for deletion
|
|
||||||
assert.Contains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
// Move ahead some more
|
|
||||||
next_tick = now.Add(45 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// The host should be evicted
|
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.NotContains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
|
|
||||||
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
|
||||||
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
|
nc.Out(hostinfo.localIndexId)
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
|
// Do a final traffic check tick, the host should now be removed
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewConnectionManagerTest2(t *testing.T) {
|
func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||||
@@ -104,59 +129,69 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
rawCertificateNoKey: []byte{},
|
rawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
|
lh := newTestLighthouse()
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &test.NoopTun{},
|
inside: &test.NoopTun{},
|
||||||
outside: &udp.Conn{},
|
outside: &udp.Conn{},
|
||||||
certState: cs,
|
|
||||||
firewall: &Firewall{},
|
firewall: &Firewall{},
|
||||||
lightHouse: lh,
|
lightHouse: lh,
|
||||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
now := time.Now()
|
ifce.certState.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
nc.HandleMonitorTick(now, p, nb, out)
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
hostinfo := &HostInfo{
|
||||||
|
vpnIp: vpnIp,
|
||||||
|
localIndexId: 1099,
|
||||||
|
remoteIndexId: 9901,
|
||||||
|
}
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
certState: cs,
|
certState: cs,
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
}
|
}
|
||||||
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(vpnIp)
|
nc.Out(hostinfo.localIndexId)
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
nc.In(hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp)
|
||||||
// Move ahead 5s. Nothing should happen
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
next_tick := now.Add(5 * time.Second)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// Move ahead 6s. We haven't heard back
|
|
||||||
next_tick = now.Add(6 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// This host should now be up for deletion
|
|
||||||
assert.Contains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
// We heard back this time
|
|
||||||
nc.In(vpnIp)
|
|
||||||
// Move ahead some more
|
|
||||||
next_tick = now.Add(45 * time.Second)
|
|
||||||
nc.HandleMonitorTick(next_tick, p, nb, out)
|
|
||||||
nc.HandleDeletionTick(next_tick)
|
|
||||||
// The host should be evicted
|
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
|
||||||
|
|
||||||
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
|
||||||
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
|
nc.Out(hostinfo.localIndexId)
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
|
// We saw traffic, should no longer be pending deletion
|
||||||
|
nc.In(hostinfo.localIndexId)
|
||||||
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we can disconnect the peer.
|
// Check if we can disconnect the peer.
|
||||||
@@ -185,7 +220,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
PublicKey: pubCA,
|
PublicKey: pubCA,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
caCert.Sign(privCA)
|
caCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||||
ncp := &cert.NebulaCAPool{
|
ncp := &cert.NebulaCAPool{
|
||||||
CAs: cert.NewCAPool().CAs,
|
CAs: cert.NewCAPool().CAs,
|
||||||
}
|
}
|
||||||
@@ -204,7 +239,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
Issuer: "ca",
|
Issuer: "ca",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
peerCert.Sign(privCA)
|
peerCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
rawCertificate: []byte{},
|
rawCertificate: []byte{},
|
||||||
@@ -213,12 +248,11 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
rawCertificateNoKey: []byte{},
|
rawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
|
lh := newTestLighthouse()
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &test.NoopTun{},
|
inside: &test.NoopTun{},
|
||||||
outside: &udp.Conn{},
|
outside: &udp.Conn{},
|
||||||
certState: cs,
|
|
||||||
firewall: &Firewall{},
|
firewall: &Firewall{},
|
||||||
lightHouse: lh,
|
lightHouse: lh,
|
||||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||||
@@ -226,11 +260,13 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
disconnectInvalid: true,
|
disconnectInvalid: true,
|
||||||
caPool: ncp,
|
caPool: ncp,
|
||||||
}
|
}
|
||||||
|
ifce.certState.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
ifce.connectionManager = nc
|
ifce.connectionManager = nc
|
||||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
@@ -243,13 +279,13 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
// Check if to disconnect with invalid certificate.
|
// Check if to disconnect with invalid certificate.
|
||||||
// Should be alive.
|
// Should be alive.
|
||||||
nextTick := now.Add(45 * time.Second)
|
nextTick := now.Add(45 * time.Second)
|
||||||
destroyed := nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
|
invalid := nc.isInvalidCertificate(nextTick, hostinfo)
|
||||||
assert.False(t, destroyed)
|
assert.False(t, invalid)
|
||||||
|
|
||||||
// Move ahead 61s.
|
// Move ahead 61s.
|
||||||
// Check if to disconnect with invalid certificate.
|
// Check if to disconnect with invalid certificate.
|
||||||
// Should be disconnected.
|
// Should be disconnected.
|
||||||
nextTick = now.Add(61 * time.Second)
|
nextTick = now.Add(61 * time.Second)
|
||||||
destroyed = nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
|
invalid = nc.isInvalidCertificate(nextTick, hostinfo)
|
||||||
assert.True(t, destroyed)
|
assert.True(t, invalid)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,31 +9,43 @@ import (
|
|||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ReplayWindow = 1024
|
const ReplayWindow = 1024
|
||||||
|
|
||||||
type ConnectionState struct {
|
type ConnectionState struct {
|
||||||
eKey *NebulaCipherState
|
eKey *NebulaCipherState
|
||||||
dKey *NebulaCipherState
|
dKey *NebulaCipherState
|
||||||
H *noise.HandshakeState
|
H *noise.HandshakeState
|
||||||
certState *CertState
|
certState *CertState
|
||||||
peerCert *cert.NebulaCertificate
|
peerCert *cert.NebulaCertificate
|
||||||
initiator bool
|
initiator bool
|
||||||
atomicMessageCounter uint64
|
messageCounter atomic.Uint64
|
||||||
window *Bits
|
window *Bits
|
||||||
queueLock sync.Mutex
|
queueLock sync.Mutex
|
||||||
writeLock sync.Mutex
|
writeLock sync.Mutex
|
||||||
ready bool
|
ready bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||||
cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256)
|
var dhFunc noise.DHFunc
|
||||||
|
curCertState := f.certState.Load()
|
||||||
|
|
||||||
|
switch curCertState.certificate.Details.Curve {
|
||||||
|
case cert.Curve_CURVE25519:
|
||||||
|
dhFunc = noise.DH25519
|
||||||
|
case cert.Curve_P256:
|
||||||
|
dhFunc = noiseutil.DHP256
|
||||||
|
default:
|
||||||
|
l.Errorf("invalid curve: %s", curCertState.certificate.Details.Curve)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cs := noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||||
if f.cipher == "chachapoly" {
|
if f.cipher == "chachapoly" {
|
||||||
cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256)
|
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||||
}
|
}
|
||||||
|
|
||||||
curCertState := f.certState
|
|
||||||
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
|
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
|
||||||
|
|
||||||
b := NewBits(ReplayWindow)
|
b := NewBits(ReplayWindow)
|
||||||
@@ -70,7 +82,7 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
|||||||
return json.Marshal(m{
|
return json.Marshal(m{
|
||||||
"certificate": cs.peerCert,
|
"certificate": cs.peerCert,
|
||||||
"initiator": cs.initiator,
|
"initiator": cs.initiator,
|
||||||
"message_counter": atomic.LoadUint64(&cs.atomicMessageCounter),
|
"message_counter": cs.messageCounter.Load(),
|
||||||
"ready": cs.ready,
|
"ready": cs.ready,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
41
control.go
41
control.go
@@ -5,7 +5,6 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sync/atomic"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@@ -62,7 +61,7 @@ func (c *Control) Start() {
|
|||||||
|
|
||||||
// Stop signals nebula to shutdown, returns after the shutdown is complete
|
// Stop signals nebula to shutdown, returns after the shutdown is complete
|
||||||
func (c *Control) Stop() {
|
func (c *Control) Stop() {
|
||||||
// Stop the handshakeManager (and other serivces), to prevent new tunnels from
|
// Stop the handshakeManager (and other services), to prevent new tunnels from
|
||||||
// being created while we're shutting them all down.
|
// being created while we're shutting them all down.
|
||||||
c.cancel()
|
c.cancel()
|
||||||
|
|
||||||
@@ -75,7 +74,7 @@ func (c *Control) Stop() {
|
|||||||
|
|
||||||
// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled
|
// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled
|
||||||
func (c *Control) ShutdownBlock() {
|
func (c *Control) ShutdownBlock() {
|
||||||
sigChan := make(chan os.Signal)
|
sigChan := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigChan, syscall.SIGTERM)
|
signal.Notify(sigChan, syscall.SIGTERM)
|
||||||
signal.Notify(sigChan, syscall.SIGINT)
|
signal.Notify(sigChan, syscall.SIGINT)
|
||||||
|
|
||||||
@@ -96,12 +95,21 @@ func (c *Control) RebindUDPServer() {
|
|||||||
c.f.rebindCount++
|
c.f.rebindCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListHostmap returns details about the actual or pending (handshaking) hostmap
|
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
|
||||||
func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo {
|
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
||||||
if pendingMap {
|
if pendingMap {
|
||||||
return listHostMap(c.f.handshakeManager.pendingHostMap)
|
return listHostMapHosts(c.f.handshakeManager.pendingHostMap)
|
||||||
} else {
|
} else {
|
||||||
return listHostMap(c.f.hostMap)
|
return listHostMapHosts(c.f.hostMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
|
||||||
|
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
||||||
|
if pendingMap {
|
||||||
|
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)
|
||||||
|
} else {
|
||||||
|
return listHostMapIndexes(c.f.hostMap)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +198,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||||||
hostInfos := []*HostInfo{}
|
hostInfos := []*HostInfo{}
|
||||||
// Grab the hostMap lock to access the Hosts map
|
// Grab the hostMap lock to access the Hosts map
|
||||||
c.f.hostMap.Lock()
|
c.f.hostMap.Lock()
|
||||||
for _, relayHost := range c.f.hostMap.Hosts {
|
for _, relayHost := range c.f.hostMap.Indexes {
|
||||||
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
|
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
|
||||||
hostInfos = append(hostInfos, relayHost)
|
hostInfos = append(hostInfos, relayHost)
|
||||||
}
|
}
|
||||||
@@ -219,7 +227,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if h.ConnectionState != nil {
|
if h.ConnectionState != nil {
|
||||||
chi.MessageCounter = atomic.LoadUint64(&h.ConnectionState.atomicMessageCounter)
|
chi.MessageCounter = h.ConnectionState.messageCounter.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
if c := h.GetCert(); c != nil {
|
if c := h.GetCert(); c != nil {
|
||||||
@@ -233,7 +241,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
|||||||
return chi
|
return chi
|
||||||
}
|
}
|
||||||
|
|
||||||
func listHostMap(hm *HostMap) []ControlHostInfo {
|
func listHostMapHosts(hm *HostMap) []ControlHostInfo {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
hosts := make([]ControlHostInfo, len(hm.Hosts))
|
hosts := make([]ControlHostInfo, len(hm.Hosts))
|
||||||
i := 0
|
i := 0
|
||||||
@@ -245,3 +253,16 @@ func listHostMap(hm *HostMap) []ControlHostInfo {
|
|||||||
|
|
||||||
return hosts
|
return hosts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func listHostMapIndexes(hm *HostMap) []ControlHostInfo {
|
||||||
|
hm.RLock()
|
||||||
|
hosts := make([]ControlHostInfo, len(hm.Indexes))
|
||||||
|
i := 0
|
||||||
|
for _, v := range hm.Indexes {
|
||||||
|
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
hm.RUnlock()
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
}
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
Signature: []byte{1, 2, 1, 2, 1, 3},
|
Signature: []byte{1, 2, 1, 2, 1, 3},
|
||||||
}
|
}
|
||||||
|
|
||||||
remotes := NewRemoteList()
|
remotes := NewRemoteList(nil)
|
||||||
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
||||||
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
||||||
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
|
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
|
||||||
"github.com/google/gopacket"
|
"github.com/google/gopacket"
|
||||||
"github.com/google/gopacket/layers"
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
@@ -14,7 +16,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WaitForTypeByIndex will pipe all messages from this control device into the pipeTo control device
|
// WaitForType will pipe all messages from this control device into the pipeTo control device
|
||||||
// returning after a message matching the criteria has been piped
|
// returning after a message matching the criteria has been piped
|
||||||
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
@@ -153,3 +155,25 @@ func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
|||||||
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Control) GetHostmap() *HostMap {
|
||||||
|
return c.f.hostMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) GetCert() *cert.NebulaCertificate {
|
||||||
|
return c.f.certState.Load().certificate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
|
||||||
|
hostinfo := c.f.handshakeManager.AddVpnIp(vpnIp, c.f.initHostInfo)
|
||||||
|
ixHandshakeStage0(c.f, vpnIp, hostinfo)
|
||||||
|
|
||||||
|
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||||
|
// We can trigger the handshake right now
|
||||||
|
if _, ok := c.f.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||||
|
select {
|
||||||
|
case c.f.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
4
dist/arch/nebula.service
vendored
4
dist/arch/nebula.service
vendored
@@ -1,6 +1,6 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=nebula
|
Description=Nebula overlay networking tool
|
||||||
Wants=basic.target network-online.target
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target network-online.target
|
After=basic.target network.target network-online.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
|||||||
5
dist/fedora/nebula.service
vendored
5
dist/fedora/nebula.service
vendored
@@ -1,15 +1,14 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Nebula overlay networking tool
|
Description=Nebula overlay networking tool
|
||||||
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target network-online.target
|
After=basic.target network.target network-online.target
|
||||||
Before=sshd.service
|
Before=sshd.service
|
||||||
Wants=basic.target network-online.target
|
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
SyslogIdentifier=nebula
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||||
Restart=always
|
Restart=always
|
||||||
SyslogIdentifier=nebula
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
@@ -33,11 +34,10 @@ func newDnsRecords(hostMap *HostMap) *dnsRecords {
|
|||||||
|
|
||||||
func (d *dnsRecords) Query(data string) string {
|
func (d *dnsRecords) Query(data string) string {
|
||||||
d.RLock()
|
d.RLock()
|
||||||
if r, ok := d.dnsMap[data]; ok {
|
defer d.RUnlock()
|
||||||
d.RUnlock()
|
if r, ok := d.dnsMap[strings.ToLower(data)]; ok {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
d.RUnlock()
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,8 +62,8 @@ func (d *dnsRecords) QueryCert(data string) string {
|
|||||||
|
|
||||||
func (d *dnsRecords) Add(host, data string) {
|
func (d *dnsRecords) Add(host, data string) {
|
||||||
d.Lock()
|
d.Lock()
|
||||||
d.dnsMap[host] = data
|
defer d.Unlock()
|
||||||
d.Unlock()
|
d.dnsMap[strings.ToLower(host)] = data
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||||
|
|||||||
@@ -4,32 +4,59 @@
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func BenchmarkHotPath(b *testing.B) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r := router.NewR(b, myControl, theirControl)
|
||||||
|
r.CancelFlowLogs()
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
_ = r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
}
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
func TestGoodHandshake(t *testing.T) {
|
func TestGoodHandshake(t *testing.T) {
|
||||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse
|
// Put their info in our lighthouse
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Start the servers
|
// Start the servers
|
||||||
myControl.Start()
|
myControl.Start()
|
||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
||||||
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||||
@@ -50,17 +77,18 @@ func TestGoodHandshake(t *testing.T) {
|
|||||||
myControl.WaitForType(1, 0, theirControl)
|
myControl.WaitForType(1, 0, theirControl)
|
||||||
|
|
||||||
t.Log("Make sure our host infos are correct")
|
t.Log("Make sure our host infos are correct")
|
||||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||||
|
|
||||||
t.Log("Get that cached packet and make sure it looks right")
|
t.Log("Get that cached packet and make sure it looks right")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("Do a bidirectional tunnel test")
|
t.Log("Do a bidirectional tunnel test")
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
defer r.RenderFlow()
|
defer r.RenderFlow()
|
||||||
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
//TODO: assert hostmaps
|
//TODO: assert hostmaps
|
||||||
@@ -72,15 +100,15 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
// The IPs here are chosen on purpose:
|
// The IPs here are chosen on purpose:
|
||||||
// The current remote handling will sort by preference, public, and then lexically.
|
// The current remote handling will sort by preference, public, and then lexically.
|
||||||
// So we need them to have a higher address than evil (we could apply a preference though)
|
// So we need them to have a higher address than evil (we could apply a preference though)
|
||||||
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
|
||||||
evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
|
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Add their real udp addr, which should be tried after evil.
|
// Add their real udp addr, which should be tried after evil.
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, evilUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl, evilControl)
|
r := router.NewR(t, myControl, theirControl, evilControl)
|
||||||
@@ -92,7 +120,7 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
evilControl.Start()
|
evilControl.Start()
|
||||||
|
|
||||||
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
err := h.Parse(p.Data)
|
err := h.Parse(p.Data)
|
||||||
@@ -111,34 +139,38 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
|
|
||||||
t.Log("My cached packet should be received by them")
|
t.Log("My cached packet should be received by them")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("Test the tunnel with them")
|
t.Log("Test the tunnel with them")
|
||||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||||
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
t.Log("Flush all packets from all controllers")
|
t.Log("Flush all packets from all controllers")
|
||||||
r.FlushAll()
|
r.FlushAll()
|
||||||
|
|
||||||
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
||||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), true), "My pending hostmap should not contain evil")
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil")
|
||||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), false), "My main hostmap should not contain evil")
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil")
|
||||||
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
||||||
|
|
||||||
//TODO: assert hostmaps for everyone
|
//TODO: assert hostmaps for everyone
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl, evilControl)
|
||||||
t.Log("Success!")
|
t.Log("Success!")
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Case1_Stage1Race(t *testing.T) {
|
func TestStage1Race(t *testing.T) {
|
||||||
|
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
|
||||||
|
// But will eventually collapse down to a single tunnel
|
||||||
|
|
||||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse and vice versa
|
// Put their info in our lighthouse and vice versa
|
||||||
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIp, myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
@@ -149,8 +181,8 @@ func Test_Case1_Stage1Race(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake to start on both me and them")
|
t.Log("Trigger a handshake to start on both me and them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
theirControl.InjectTunUDPPacket(myVpnIp, 80, 80, []byte("Hi from them"))
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
t.Log("Get both stage 1 handshake packets")
|
t.Log("Get both stage 1 handshake packets")
|
||||||
myHsForThem := myControl.GetFromUDP(true)
|
myHsForThem := myControl.GetFromUDP(true)
|
||||||
@@ -159,43 +191,165 @@ func Test_Case1_Stage1Race(t *testing.T) {
|
|||||||
r.Log("Now inject both stage 1 handshake packets")
|
r.Log("Now inject both stage 1 handshake packets")
|
||||||
r.InjectUDPPacket(theirControl, myControl, theirHsForMe)
|
r.InjectUDPPacket(theirControl, myControl, theirHsForMe)
|
||||||
r.InjectUDPPacket(myControl, theirControl, myHsForThem)
|
r.InjectUDPPacket(myControl, theirControl, myHsForThem)
|
||||||
//TODO: they should win, grab their index for me and make sure I use it in the end.
|
|
||||||
|
|
||||||
r.Log("They should not have a stage 2 (won the race) but I should send one")
|
r.Log("Route until they receive a message packet")
|
||||||
r.InjectUDPPacket(myControl, theirControl, myControl.GetFromUDP(true))
|
myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
r.Log("Route for me until I send a message packet to them")
|
r.Log("Their cached packet should be received by me")
|
||||||
r.RouteForAllUntilAfterMsgTypeTo(theirControl, header.Message, header.MessageNone)
|
theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("My cached packet should be received by them")
|
r.Log("Do a bidirectional tunnel test")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
|
||||||
|
|
||||||
t.Log("Route for them until I send a message packet to me")
|
myHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
theirControl.WaitForType(1, 0, myControl)
|
myHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
t.Log("Their cached packet should be received by me")
|
// We should have two tunnels on both sides
|
||||||
theirCachedPacket := myControl.GetFromTun(true)
|
assert.Len(t, myHostmapHosts, 1)
|
||||||
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIp, myVpnIp, 80, 80)
|
assert.Len(t, theirHostmapHosts, 1)
|
||||||
|
assert.Len(t, myHostmapIndexes, 2)
|
||||||
|
assert.Len(t, theirHostmapIndexes, 2)
|
||||||
|
|
||||||
t.Log("Do a bidirectional tunnel test")
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
|
||||||
|
|
||||||
|
r.Log("Spin until connection manager tears down a tunnel")
|
||||||
|
|
||||||
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
|
// We should only have a single tunnel now on both sides
|
||||||
|
assert.Len(t, myFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, myFinalHostmapIndexes, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapIndexes, 1)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
//TODO: assert hostmaps
|
}
|
||||||
|
|
||||||
|
func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from me to them")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
|
r.Log("Nuke my hostmap")
|
||||||
|
myHostmap := myControl.GetHostmap()
|
||||||
|
myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||||
|
myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again"))
|
||||||
|
p = r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.Log("Wait for the dead index to go away")
|
||||||
|
start := len(theirControl.GetHostmap().Indexes)
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
if len(theirControl.GetHostmap().Indexes) < start {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from me to them")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Nuke my hostmap")
|
||||||
|
theirHostmap := theirControl.GetHostmap()
|
||||||
|
theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||||
|
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again"))
|
||||||
|
p = r.RouteForAllUntilTxTun(myControl)
|
||||||
|
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.Log("Wait for the dead index to go away")
|
||||||
|
start := len(myControl.GetHostmap().Indexes)
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
if len(myControl.GetHostmap().Indexes) < start {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRelays(t *testing.T) {
|
func TestRelays(t *testing.T) {
|
||||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIp, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
relayControl, relayVpnIp, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(relayVpnIp, relayUdpAddr)
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
myControl.InjectRelays(theirVpnIp, []net.IP{relayVpnIp})
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
@@ -207,11 +361,510 @@ func TestRelays(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake from me to them via the relay")
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIp, theirVpnIp, 80, 80)
|
r.Log("Assert the tunnel works")
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||||
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStage1RaceRelays(t *testing.T) {
|
||||||
|
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between me and relay")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between them and relay")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
r.Log("Wait for a packet from them to me")
|
||||||
|
p := r.RouteForAllUntilTxTun(myControl)
|
||||||
|
_ = p
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
relayControl.Stop()
|
||||||
|
//
|
||||||
|
////TODO: assert hostmaps
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStage1RaceRelays2(t *testing.T) {
|
||||||
|
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
l := NewTestLogger()
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between me and relay")
|
||||||
|
l.Info("Get a tunnel between me and relay")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Get a tunnel between them and relay")
|
||||||
|
l.Info("Get a tunnel between them and relay")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
|
||||||
|
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||||
|
l.Info("Trigger a handshake from both them and me via relay to them and me")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
//r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone)
|
||||||
|
//r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone)
|
||||||
|
|
||||||
|
r.Log("Wait for a packet from them to me")
|
||||||
|
l.Info("Wait for a packet from them to me; myControl")
|
||||||
|
r.RouteForAllUntilTxTun(myControl)
|
||||||
|
l.Info("Wait for a packet from them to me; theirControl")
|
||||||
|
r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
l.Info("Assert the tunnel works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
|
||||||
|
t.Log("Wait until we remove extra tunnels")
|
||||||
|
l.Info("Wait until we remove extra tunnels")
|
||||||
|
l.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"myControl": len(myControl.GetHostmap().Indexes),
|
||||||
|
"theirControl": len(theirControl.GetHostmap().Indexes),
|
||||||
|
"relayControl": len(relayControl.GetHostmap().Indexes),
|
||||||
|
}).Info("Waiting for hostinfos to be removed...")
|
||||||
|
hostInfos := len(myControl.GetHostmap().Indexes) + len(theirControl.GetHostmap().Indexes) + len(relayControl.GetHostmap().Indexes)
|
||||||
|
retries := 60
|
||||||
|
for hostInfos > 6 && retries > 0 {
|
||||||
|
hostInfos = len(myControl.GetHostmap().Indexes) + len(theirControl.GetHostmap().Indexes) + len(relayControl.GetHostmap().Indexes)
|
||||||
|
l.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"myControl": len(myControl.GetHostmap().Indexes),
|
||||||
|
"theirControl": len(theirControl.GetHostmap().Indexes),
|
||||||
|
"relayControl": len(relayControl.GetHostmap().Indexes),
|
||||||
|
}).Info("Waiting for hostinfos to be removed...")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
retries--
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
l.Info("Assert the tunnel works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
relayControl.Stop()
|
||||||
|
|
||||||
|
//
|
||||||
|
////TODO: assert hostmaps
|
||||||
|
}
|
||||||
|
func TestRehandshakingRelays(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
|
||||||
|
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||||
|
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
|
||||||
|
r.Log("Renew relay certificate and spin until me and them sees it")
|
||||||
|
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||||
|
|
||||||
|
caB, err := ca.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
relayConfig.Settings["pki"] = m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(myNextPEM),
|
||||||
|
"key": string(myNextPrivKey),
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(relayConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
relayConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
for {
|
||||||
|
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
r.Log("Certificate between my and relay is updated!")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
r.Log("Certificate between their and relay is updated!")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
// We should have two hostinfos on all sides
|
||||||
|
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("myControl hostinfos got cleaned up!")
|
||||||
|
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("theirControl hostinfos got cleaned up!")
|
||||||
|
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||||
|
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||||
|
r.Log("Assert the relay tunnel still works")
|
||||||
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
r.Log("yupitdoes")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
t.Logf("relayControl hostinfos got cleaned up!")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRehandshaking(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse and vice versa
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Stand up a tunnel between me and them")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Renew my certificate and spin until their sees it")
|
||||||
|
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
|
||||||
|
|
||||||
|
caB, err := ca.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
myConfig.Settings["pki"] = m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(myNextPEM),
|
||||||
|
"key": string(myNextPrivKey),
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(myConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
myConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
|
// We have a new certificate now
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flip their firewall to only allowing the new group to catch the tunnels reverting incorrectly
|
||||||
|
rc, err = yaml.Marshal(theirConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
var theirNewConfig m
|
||||||
|
assert.NoError(t, yaml.Unmarshal(rc, &theirNewConfig))
|
||||||
|
theirFirewall := theirNewConfig["firewall"].(map[interface{}]interface{})
|
||||||
|
theirFirewall["inbound"] = []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"group": "new group",
|
||||||
|
}}
|
||||||
|
rc, err = yaml.Marshal(theirNewConfig)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
theirConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
r.Log("Spin until there is only 1 tunnel")
|
||||||
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
|
// Make sure the correct tunnel won
|
||||||
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
|
assert.Contains(t, c.Cert.Details.Groups, "new group")
|
||||||
|
|
||||||
|
// We should only have a single tunnel now on both sides
|
||||||
|
assert.Len(t, myFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, myFinalHostmapIndexes, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapIndexes, 1)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRehandshakingLoser(t *testing.T) {
|
||||||
|
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
|
||||||
|
// Should be the one with the new certificate
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse and vice versa
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Stand up a tunnel between me and them")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
tt1 := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
|
tt2 := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
|
fmt.Println(tt1.LocalIndex, tt2.LocalIndex)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Renew their certificate and spin until mine sees it")
|
||||||
|
_, _, theirNextPrivKey, theirNextPEM := newTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
|
||||||
|
|
||||||
|
caB, err := ca.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
theirConfig.Settings["pki"] = m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(theirNextPEM),
|
||||||
|
"key": string(theirNextPrivKey),
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(theirConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
theirConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
|
|
||||||
|
_, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"]
|
||||||
|
if theirNewGroup {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flip my firewall to only allowing the new group to catch the tunnels reverting incorrectly
|
||||||
|
rc, err = yaml.Marshal(myConfig.Settings)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
var myNewConfig m
|
||||||
|
assert.NoError(t, yaml.Unmarshal(rc, &myNewConfig))
|
||||||
|
theirFirewall := myNewConfig["firewall"].(map[interface{}]interface{})
|
||||||
|
theirFirewall["inbound"] = []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"group": "their new group",
|
||||||
|
}}
|
||||||
|
rc, err = yaml.Marshal(myNewConfig)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
myConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
r.Log("Spin until there is only 1 tunnel")
|
||||||
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
|
// Make sure the correct tunnel won
|
||||||
|
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
|
assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group")
|
||||||
|
|
||||||
|
// We should only have a single tunnel now on both sides
|
||||||
|
assert.Len(t, myFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapHosts, 1)
|
||||||
|
assert.Len(t, myFinalHostmapIndexes, 1)
|
||||||
|
assert.Len(t, theirFinalHostmapIndexes, 1)
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRaceRegression(t *testing.T) {
|
||||||
|
// This test forces stage 1, stage 2, stage 1 to be received by me from them
|
||||||
|
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
|
||||||
|
// caused a cross-linked hostinfo
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
//them rx stage:1 initiatorIndex=642843150 responderIndex=0
|
||||||
|
//me rx stage:1 initiatorIndex=120607833 responderIndex=0
|
||||||
|
//them rx stage:1 initiatorIndex=642843150 responderIndex=0
|
||||||
|
//me rx stage:2 initiatorIndex=642843150 responderIndex=3701775874
|
||||||
|
//me rx stage:1 initiatorIndex=120607833 responderIndex=0
|
||||||
|
//them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089
|
||||||
|
|
||||||
|
t.Log("Start both handshakes")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
t.Log("Get both stage 1")
|
||||||
|
myStage1ForThem := myControl.GetFromUDP(true)
|
||||||
|
theirStage1ForMe := theirControl.GetFromUDP(true)
|
||||||
|
|
||||||
|
t.Log("Inject them in a special way")
|
||||||
|
theirControl.InjectUDPPacket(myStage1ForThem)
|
||||||
|
myControl.InjectUDPPacket(theirStage1ForMe)
|
||||||
|
theirControl.InjectUDPPacket(myStage1ForThem)
|
||||||
|
|
||||||
|
//TODO: ensure stage 2
|
||||||
|
t.Log("Get both stage 2")
|
||||||
|
myStage2ForThem := myControl.GetFromUDP(true)
|
||||||
|
theirStage2ForMe := theirControl.GetFromUDP(true)
|
||||||
|
|
||||||
|
t.Log("Inject them in a special way again")
|
||||||
|
myControl.InjectUDPPacket(theirStage2ForMe)
|
||||||
|
myControl.InjectUDPPacket(theirStage1ForMe)
|
||||||
|
theirControl.InjectUDPPacket(myStage2ForThem)
|
||||||
|
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
t.Log("Flush the packets")
|
||||||
|
r.RouteForAllUntilTxTun(myControl)
|
||||||
|
r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
t.Log("Make sure the tunnel still works")
|
||||||
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: test
|
||||||
|
// Race winner renews and handshakes
|
||||||
|
// Race loser renews and handshakes
|
||||||
|
// Does race winner repin the cert to old?
|
||||||
//TODO: add a test with many lies
|
//TODO: add a test with many lies
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -31,7 +30,7 @@ import (
|
|||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
|
|
||||||
// newSimpleServer creates a nebula instance with many assumptions
|
// newSimpleServer creates a nebula instance with many assumptions
|
||||||
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, net.IP, *net.UDPAddr) {
|
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) {
|
||||||
l := NewTestLogger()
|
l := NewTestLogger()
|
||||||
|
|
||||||
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
||||||
@@ -78,6 +77,10 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
|||||||
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
||||||
"level": l.Level.String(),
|
"level": l.Level.String(),
|
||||||
},
|
},
|
||||||
|
"timers": m{
|
||||||
|
"pending_deletion_interval": 2,
|
||||||
|
"connection_alive_interval": 2,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if overrides != nil {
|
if overrides != nil {
|
||||||
@@ -102,7 +105,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return control, vpnIpNet.IP, &udpAddr
|
return control, vpnIpNet, &udpAddr, c
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTestCaCert will generate a CA cert
|
// newTestCaCert will generate a CA cert
|
||||||
@@ -138,7 +141,7 @@ func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []
|
|||||||
nc.Details.Groups = groups
|
nc.Details.Groups = groups
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(priv)
|
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -184,7 +187,7 @@ func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, af
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nc.Sign(key)
|
err = nc.Sign(ca.Details.Curve, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -232,12 +235,12 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
|
|||||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
||||||
// Send a packet from them to me
|
// Send a packet from them to me
|
||||||
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
||||||
bPacket := r.RouteUntilTxTun(controlB, controlA)
|
bPacket := r.RouteForAllUntilTxTun(controlA)
|
||||||
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
||||||
|
|
||||||
// And once more from me to them
|
// And once more from me to them
|
||||||
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
|
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
|
||||||
aPacket := r.RouteUntilTxTun(controlA, controlB)
|
aPacket := r.RouteForAllUntilTxTun(controlB)
|
||||||
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,7 +307,8 @@ func NewTestLogger() *logrus.Logger {
|
|||||||
|
|
||||||
v := os.Getenv("TEST_LOGS")
|
v := os.Getenv("TEST_LOGS")
|
||||||
if v == "" {
|
if v == "" {
|
||||||
l.SetOutput(ioutil.Discard)
|
l.SetOutput(io.Discard)
|
||||||
|
l.SetLevel(logrus.PanicLevel)
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
145
e2e/router/hostmap.go
Normal file
145
e2e/router/hostmap.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package router
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type edge struct {
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
dual bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func renderHostmaps(controls ...*nebula.Control) string {
|
||||||
|
var lines []*edge
|
||||||
|
r := "graph TB\n"
|
||||||
|
for _, c := range controls {
|
||||||
|
sr, se := renderHostmap(c)
|
||||||
|
r += sr
|
||||||
|
for _, e := range se {
|
||||||
|
add := true
|
||||||
|
|
||||||
|
// Collapse duplicate edges into a bi-directionally connected edge
|
||||||
|
for _, ge := range lines {
|
||||||
|
if e.to == ge.from && e.from == ge.to {
|
||||||
|
add = false
|
||||||
|
ge.dual = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if add {
|
||||||
|
lines = append(lines, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
if line.dual {
|
||||||
|
r += fmt.Sprintf("\t%v <--> %v\n", line.from, line.to)
|
||||||
|
} else {
|
||||||
|
r += fmt.Sprintf("\t%v --> %v\n", line.from, line.to)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func renderHostmap(c *nebula.Control) (string, []*edge) {
|
||||||
|
var lines []string
|
||||||
|
var globalLines []*edge
|
||||||
|
|
||||||
|
clusterName := strings.Trim(c.GetCert().Details.Name, " ")
|
||||||
|
clusterVpnIp := c.GetCert().Details.Ips[0].IP
|
||||||
|
r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp)
|
||||||
|
|
||||||
|
hm := c.GetHostmap()
|
||||||
|
hm.RLock()
|
||||||
|
defer hm.RUnlock()
|
||||||
|
|
||||||
|
// Draw the vpn to index nodes
|
||||||
|
r += fmt.Sprintf("\t\tsubgraph %s.hosts[\"Hosts (vpn ip to index)\"]\n", clusterName)
|
||||||
|
hosts := sortedHosts(hm.Hosts)
|
||||||
|
for _, vpnIp := range hosts {
|
||||||
|
hi := hm.Hosts[vpnIp]
|
||||||
|
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, vpnIp, vpnIp)
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, hi.GetLocalIndex()))
|
||||||
|
|
||||||
|
rs := hi.GetRelayState()
|
||||||
|
for _, relayIp := range rs.CopyRelayIps() {
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, relayIp := range rs.CopyRelayForIdxs() {
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r += "\t\tend\n"
|
||||||
|
|
||||||
|
// Draw the relay hostinfos
|
||||||
|
if len(hm.Relays) > 0 {
|
||||||
|
r += fmt.Sprintf("\t\tsubgraph %s.relays[\"Relays (relay index to hostinfo)\"]\n", clusterName)
|
||||||
|
for relayIndex, hi := range hm.Relays {
|
||||||
|
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, relayIndex, relayIndex)
|
||||||
|
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, relayIndex, clusterName, hi.GetLocalIndex()))
|
||||||
|
}
|
||||||
|
r += "\t\tend\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Draw the local index to relay or remote index nodes
|
||||||
|
r += fmt.Sprintf("\t\tsubgraph indexes.%s[\"Indexes (index to hostinfo)\"]\n", clusterName)
|
||||||
|
indexes := sortedIndexes(hm.Indexes)
|
||||||
|
for _, idx := range indexes {
|
||||||
|
hi, ok := hm.Indexes[idx]
|
||||||
|
if ok {
|
||||||
|
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp())
|
||||||
|
remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ")
|
||||||
|
globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())})
|
||||||
|
_ = hi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r += "\t\tend\n"
|
||||||
|
|
||||||
|
// Add the edges inside this host
|
||||||
|
for _, line := range lines {
|
||||||
|
r += fmt.Sprintf("\t\t%v\n", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
r += "\tend\n"
|
||||||
|
return r, globalLines
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp {
|
||||||
|
keys := make([]iputil.VpnIp, 0, len(hosts))
|
||||||
|
for key := range hosts {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(keys, func(i, j int) bool {
|
||||||
|
return keys[i] > keys[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortedIndexes(indexes map[uint32]*nebula.HostInfo) []uint32 {
|
||||||
|
keys := make([]uint32, 0, len(indexes))
|
||||||
|
for key := range indexes {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(keys, func(i, j int) bool {
|
||||||
|
return keys[i] > keys[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -22,6 +23,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
type R struct {
|
type R struct {
|
||||||
@@ -40,14 +42,37 @@ type R struct {
|
|||||||
// A map of vpn ip to the nebula control it belongs to
|
// A map of vpn ip to the nebula control it belongs to
|
||||||
vpnControls map[iputil.VpnIp]*nebula.Control
|
vpnControls map[iputil.VpnIp]*nebula.Control
|
||||||
|
|
||||||
flow []flowEntry
|
ignoreFlows []ignoreFlow
|
||||||
|
flow []flowEntry
|
||||||
|
|
||||||
|
// A set of additional mermaid graphs to draw in the flow log markdown file
|
||||||
|
// Currently consisting only of hostmap renders
|
||||||
|
additionalGraphs []mermaidGraph
|
||||||
|
|
||||||
// All interactions are locked to help serialize behavior
|
// All interactions are locked to help serialize behavior
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
fn string
|
fn string
|
||||||
cancelRender context.CancelFunc
|
cancelRender context.CancelFunc
|
||||||
t *testing.T
|
t testing.TB
|
||||||
|
}
|
||||||
|
|
||||||
|
type ignoreFlow struct {
|
||||||
|
tun NullBool
|
||||||
|
messageType header.MessageType
|
||||||
|
subType header.MessageSubType
|
||||||
|
//from
|
||||||
|
//to
|
||||||
|
}
|
||||||
|
|
||||||
|
type mermaidGraph struct {
|
||||||
|
title string
|
||||||
|
content string
|
||||||
|
}
|
||||||
|
|
||||||
|
type NullBool struct {
|
||||||
|
HasValue bool
|
||||||
|
IsTrue bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type flowEntry struct {
|
type flowEntry struct {
|
||||||
@@ -63,6 +88,12 @@ type packet struct {
|
|||||||
rx bool // the packet was received by a udp device
|
rx bool // the packet was received by a udp device
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *packet) WasReceived() {
|
||||||
|
if p != nil {
|
||||||
|
p.rx = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ExitType int
|
type ExitType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -79,7 +110,7 @@ type ExitFunc func(packet *udp.Packet, receiver *nebula.Control) ExitType
|
|||||||
// NewR creates a new router to pass packets in a controlled fashion between the provided controllers.
|
// NewR creates a new router to pass packets in a controlled fashion between the provided controllers.
|
||||||
// The packet flow will be recorded in a file within the mermaid directory under the same name as the test.
|
// The packet flow will be recorded in a file within the mermaid directory under the same name as the test.
|
||||||
// Renders will occur automatically, roughly every 100ms, until a call to RenderFlow() is made
|
// Renders will occur automatically, roughly every 100ms, until a call to RenderFlow() is made
|
||||||
func NewR(t *testing.T, controls ...*nebula.Control) *R {
|
func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
if err := os.MkdirAll("mermaid", 0755); err != nil {
|
if err := os.MkdirAll("mermaid", 0755); err != nil {
|
||||||
@@ -91,6 +122,8 @@ func NewR(t *testing.T, controls ...*nebula.Control) *R {
|
|||||||
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
|
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
|
||||||
inNat: make(map[string]*nebula.Control),
|
inNat: make(map[string]*nebula.Control),
|
||||||
outNat: make(map[string]net.UDPAddr),
|
outNat: make(map[string]net.UDPAddr),
|
||||||
|
flow: []flowEntry{},
|
||||||
|
ignoreFlows: []ignoreFlow{},
|
||||||
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
||||||
t: t,
|
t: t,
|
||||||
cancelRender: cancel,
|
cancelRender: cancel,
|
||||||
@@ -119,6 +152,7 @@ func NewR(t *testing.T, controls ...*nebula.Control) *R {
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-clockSource.C:
|
case <-clockSource.C:
|
||||||
|
r.renderHostmaps("clock tick")
|
||||||
r.renderFlow()
|
r.renderFlow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -148,14 +182,24 @@ func (r *R) RenderFlow() {
|
|||||||
r.renderFlow()
|
r.renderFlow()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CancelFlowLogs stops flow logs from being tracked and destroys any logs already collected
|
||||||
|
func (r *R) CancelFlowLogs() {
|
||||||
|
r.cancelRender()
|
||||||
|
r.flow = nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *R) renderFlow() {
|
func (r *R) renderFlow() {
|
||||||
|
if r.flow == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
|
f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var participants = map[string]struct{}{}
|
var participants = map[string]struct{}{}
|
||||||
var participansVals []string
|
var participantsVals []string
|
||||||
|
|
||||||
fmt.Fprintln(f, "```mermaid")
|
fmt.Fprintln(f, "```mermaid")
|
||||||
fmt.Fprintln(f, "sequenceDiagram")
|
fmt.Fprintln(f, "sequenceDiagram")
|
||||||
@@ -171,19 +215,24 @@ func (r *R) renderFlow() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
participants[addr] = struct{}{}
|
participants[addr] = struct{}{}
|
||||||
sanAddr := strings.Replace(addr, ":", "#58;", 1)
|
sanAddr := strings.Replace(addr, ":", "-", 1)
|
||||||
participansVals = append(participansVals, sanAddr)
|
participantsVals = append(participantsVals, sanAddr)
|
||||||
fmt.Fprintf(
|
fmt.Fprintf(
|
||||||
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
||||||
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
|
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(participantsVals) > 2 {
|
||||||
|
// Get the first and last participantVals for notes
|
||||||
|
participantsVals = []string{participantsVals[0], participantsVals[len(participantsVals)-1]}
|
||||||
|
}
|
||||||
|
|
||||||
// Print packets
|
// Print packets
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
for _, e := range r.flow {
|
for _, e := range r.flow {
|
||||||
if e.packet == nil {
|
if e.packet == nil {
|
||||||
fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participansVals, ", "), e.note)
|
//fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,15 +251,77 @@ func (r *R) renderFlow() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(f,
|
fmt.Fprintf(f,
|
||||||
" %s%s%s: %s(%s), counter: %v\n",
|
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
||||||
strings.Replace(p.from.GetUDPAddr(), ":", "#58;", 1),
|
strings.Replace(p.from.GetUDPAddr(), ":", "-", 1),
|
||||||
line,
|
line,
|
||||||
strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1),
|
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||||
h.TypeName(), h.SubTypeName(), h.MessageCounter,
|
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Fprintln(f, "```")
|
fmt.Fprintln(f, "```")
|
||||||
|
|
||||||
|
for _, g := range r.additionalGraphs {
|
||||||
|
fmt.Fprintf(f, "## %s\n", g.title)
|
||||||
|
fmt.Fprintln(f, "```mermaid")
|
||||||
|
fmt.Fprintln(f, g.content)
|
||||||
|
fmt.Fprintln(f, "```")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria.
|
||||||
|
// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets
|
||||||
|
// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered
|
||||||
|
func (r *R) IgnoreFlow(messageType header.MessageType, subType header.MessageSubType, tun NullBool) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
r.ignoreFlows = append(r.ignoreFlows, ignoreFlow{
|
||||||
|
tun,
|
||||||
|
messageType,
|
||||||
|
subType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
s := renderHostmaps(controls...)
|
||||||
|
if len(r.additionalGraphs) > 0 {
|
||||||
|
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
|
||||||
|
if lastGraph.content == s && lastGraph.title == title {
|
||||||
|
// Ignore this rendering if it matches the last rendering added
|
||||||
|
// This is useful if you want to track rendering changes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
|
||||||
|
title: title,
|
||||||
|
content: s,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *R) renderHostmaps(title string) {
|
||||||
|
c := maps.Values(r.controls)
|
||||||
|
sort.SliceStable(c, func(i, j int) bool {
|
||||||
|
return c[i].GetVpnIp() > c[j].GetVpnIp()
|
||||||
|
})
|
||||||
|
|
||||||
|
s := renderHostmaps(c...)
|
||||||
|
if len(r.additionalGraphs) > 0 {
|
||||||
|
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
|
||||||
|
if lastGraph.content == s {
|
||||||
|
// Ignore this rendering if it matches the last rendering added
|
||||||
|
// This is useful if you want to track rendering changes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
|
||||||
|
title: title,
|
||||||
|
content: s,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// InjectFlow can be used to record packet flow if the test is handling the routing on its own.
|
// InjectFlow can be used to record packet flow if the test is handling the routing on its own.
|
||||||
@@ -222,6 +333,10 @@ func (r *R) InjectFlow(from, to *nebula.Control, p *udp.Packet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *R) Log(arg ...any) {
|
func (r *R) Log(arg ...any) {
|
||||||
|
if r.flow == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
r.Lock()
|
r.Lock()
|
||||||
r.flow = append(r.flow, flowEntry{note: fmt.Sprint(arg...)})
|
r.flow = append(r.flow, flowEntry{note: fmt.Sprint(arg...)})
|
||||||
r.t.Log(arg...)
|
r.t.Log(arg...)
|
||||||
@@ -229,6 +344,10 @@ func (r *R) Log(arg ...any) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *R) Logf(format string, arg ...any) {
|
func (r *R) Logf(format string, arg ...any) {
|
||||||
|
if r.flow == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
r.Lock()
|
r.Lock()
|
||||||
r.flow = append(r.flow, flowEntry{note: fmt.Sprintf(format, arg...)})
|
r.flow = append(r.flow, flowEntry{note: fmt.Sprintf(format, arg...)})
|
||||||
r.t.Logf(format, arg...)
|
r.t.Logf(format, arg...)
|
||||||
@@ -236,14 +355,40 @@ func (r *R) Logf(format string, arg ...any) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// unlockedInjectFlow is used by the router to record a packet has been transmitted, the packet is returned and
|
// unlockedInjectFlow is used by the router to record a packet has been transmitted, the packet is returned and
|
||||||
// should be marked as received AFTER it has been placed on the receivers channel
|
// should be marked as received AFTER it has been placed on the receivers channel.
|
||||||
|
// If flow logs have been disabled this function will return nil
|
||||||
func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool) *packet {
|
func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool) *packet {
|
||||||
|
if r.flow == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.renderHostmaps(fmt.Sprintf("Packet %v", len(r.flow)))
|
||||||
|
|
||||||
|
if len(r.ignoreFlows) > 0 {
|
||||||
|
var h header.H
|
||||||
|
err := h.Parse(p.Data)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range r.ignoreFlows {
|
||||||
|
if !tun {
|
||||||
|
if i.messageType == h.Type && i.subType == h.Subtype {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else if i.tun.HasValue && i.tun.IsTrue {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fp := &packet{
|
fp := &packet{
|
||||||
from: from,
|
from: from,
|
||||||
to: to,
|
to: to,
|
||||||
packet: p.Copy(),
|
packet: p.Copy(),
|
||||||
tun: tun,
|
tun: tun,
|
||||||
}
|
}
|
||||||
|
|
||||||
r.flow = append(r.flow, flowEntry{packet: fp})
|
r.flow = append(r.flow, flowEntry{packet: fp})
|
||||||
return fp
|
return fp
|
||||||
}
|
}
|
||||||
@@ -285,7 +430,7 @@ func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []
|
|||||||
}
|
}
|
||||||
fp := r.unlockedInjectFlow(sender, c, p, false)
|
fp := r.unlockedInjectFlow(sender, c, p, false)
|
||||||
c.InjectUDPPacket(p)
|
c.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -344,7 +489,7 @@ func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
|
|||||||
}
|
}
|
||||||
fp := r.unlockedInjectFlow(cm[x], c, p, false)
|
fp := r.unlockedInjectFlow(cm[x], c, p, false)
|
||||||
c.InjectUDPPacket(p)
|
c.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
}
|
}
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
}
|
}
|
||||||
@@ -381,14 +526,14 @@ func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
|
|||||||
case RouteAndExit:
|
case RouteAndExit:
|
||||||
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
return
|
return
|
||||||
|
|
||||||
case KeepRouting:
|
case KeepRouting:
|
||||||
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
fp := r.unlockedInjectFlow(sender, receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
||||||
@@ -439,7 +584,7 @@ func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet
|
|||||||
|
|
||||||
fp := r.unlockedInjectFlow(sender, receiver, packet, false)
|
fp := r.unlockedInjectFlow(sender, receiver, packet, false)
|
||||||
receiver.InjectUDPPacket(packet)
|
receiver.InjectUDPPacket(packet)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
||||||
@@ -503,14 +648,14 @@ func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
|
|||||||
case RouteAndExit:
|
case RouteAndExit:
|
||||||
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
return
|
return
|
||||||
|
|
||||||
case KeepRouting:
|
case KeepRouting:
|
||||||
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
|
||||||
receiver.InjectUDPPacket(p)
|
receiver.InjectUDPPacket(p)
|
||||||
fp.rx = true
|
fp.WasReceived()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
||||||
@@ -613,8 +758,8 @@ func (r *R) formatUdpPacket(p *packet) string {
|
|||||||
data := packet.ApplicationLayer()
|
data := packet.ApplicationLayer()
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
||||||
strings.Replace(from, ":", "#58;", 1),
|
strings.Replace(from, ":", "-", 1),
|
||||||
strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1),
|
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||||
udp.SrcPort,
|
udp.SrcPort,
|
||||||
udp.DstPort,
|
udp.DstPort,
|
||||||
string(data.Payload()),
|
string(data.Payload()),
|
||||||
|
|||||||
@@ -47,8 +47,9 @@ lighthouse:
|
|||||||
# allowed. You can provide CIDRs here with `true` to allow and `false` to
|
# allowed. You can provide CIDRs here with `true` to allow and `false` to
|
||||||
# deny. The most specific CIDR rule applies to each remote. If all rules are
|
# deny. The most specific CIDR rule applies to each remote. If all rules are
|
||||||
# "allow", the default will be "deny", and vice-versa. If both "allow" and
|
# "allow", the default will be "deny", and vice-versa. If both "allow" and
|
||||||
# "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
|
# "deny" IPv4 rules are present, then you MUST set a rule for "0.0.0.0/0" as
|
||||||
# default.
|
# the default. Similarly if both "allow" and "deny" IPv6 rules are present,
|
||||||
|
# then you MUST set a rule for "::/0" as the default.
|
||||||
#remote_allow_list:
|
#remote_allow_list:
|
||||||
# Example to block IPs from this subnet from being used for remote IPs.
|
# Example to block IPs from this subnet from being used for remote IPs.
|
||||||
#"172.16.0.0/12": false
|
#"172.16.0.0/12": false
|
||||||
@@ -58,7 +59,7 @@ lighthouse:
|
|||||||
#"10.0.0.0/8": false
|
#"10.0.0.0/8": false
|
||||||
#"10.42.42.0/24": true
|
#"10.42.42.0/24": true
|
||||||
|
|
||||||
# EXPERIMENTAL: This option my change or disappear in the future.
|
# EXPERIMENTAL: This option may change or disappear in the future.
|
||||||
# Optionally allows the definition of remote_allow_list blocks
|
# Optionally allows the definition of remote_allow_list blocks
|
||||||
# specific to an inside VPN IP CIDR.
|
# specific to an inside VPN IP CIDR.
|
||||||
#remote_allow_ranges:
|
#remote_allow_ranges:
|
||||||
@@ -90,10 +91,23 @@ lighthouse:
|
|||||||
#- "1.1.1.1:4242"
|
#- "1.1.1.1:4242"
|
||||||
#- "1.2.3.4:0" # port will be replaced with the real listening port
|
#- "1.2.3.4:0" # port will be replaced with the real listening port
|
||||||
|
|
||||||
|
# EXPERIMENTAL: This option may change or disappear in the future.
|
||||||
|
# This setting allows us to "guess" what the remote might be for a host
|
||||||
|
# while we wait for the lighthouse response.
|
||||||
|
#calculated_remotes:
|
||||||
|
# For any Nebula IPs in 10.0.10.0/24, this will apply the mask and add
|
||||||
|
# the calculated IP as an initial remote (while we wait for the response
|
||||||
|
# from the lighthouse). Both CIDRs must have the same mask size.
|
||||||
|
# For example, Nebula IP 10.0.10.123 will have a calculated remote of
|
||||||
|
# 192.168.1.123
|
||||||
|
#10.0.10.0/24:
|
||||||
|
#- mask: 192.168.1.0/24
|
||||||
|
# port: 4242
|
||||||
|
|
||||||
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
||||||
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
||||||
listen:
|
listen:
|
||||||
# To listen on both any ipv4 and ipv6 use "[::]"
|
# To listen on both any ipv4 and ipv6 use "::"
|
||||||
host: 0.0.0.0
|
host: 0.0.0.0
|
||||||
port: 4242
|
port: 4242
|
||||||
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
||||||
@@ -128,12 +142,15 @@ punchy:
|
|||||||
# Default is false
|
# Default is false
|
||||||
#respond: true
|
#respond: true
|
||||||
|
|
||||||
# delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
|
# delays a punch response for misbehaving NATs, default is 1 second.
|
||||||
#delay: 1s
|
#delay: 1s
|
||||||
|
|
||||||
|
# set the delay before attempting punchy.respond. Default is 5 seconds. respond must be true to take effect.
|
||||||
|
#respond_delay: 5s
|
||||||
|
|
||||||
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
|
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
|
||||||
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
|
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
|
||||||
#cipher: chachapoly
|
#cipher: aes
|
||||||
|
|
||||||
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
||||||
# path to a network adjacent nebula node.
|
# path to a network adjacent nebula node.
|
||||||
@@ -187,21 +204,28 @@ tun:
|
|||||||
tx_queue: 500
|
tx_queue: 500
|
||||||
# Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
|
# Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
|
||||||
mtu: 1300
|
mtu: 1300
|
||||||
|
|
||||||
# Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
|
# Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
|
||||||
routes:
|
routes:
|
||||||
#- mtu: 8800
|
#- mtu: 8800
|
||||||
# route: 10.0.0.0/16
|
# route: 10.0.0.0/16
|
||||||
|
|
||||||
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
||||||
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
||||||
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
||||||
# `mtu` will default to tun mtu if this option is not specified
|
# `mtu`: will default to tun mtu if this option is not specified
|
||||||
# `metric` will default to 0 if this option is not specified
|
# `metric`: will default to 0 if this option is not specified
|
||||||
|
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||||
unsafe_routes:
|
unsafe_routes:
|
||||||
#- route: 172.16.1.0/24
|
#- route: 172.16.1.0/24
|
||||||
# via: 192.168.100.99
|
# via: 192.168.100.99
|
||||||
# mtu: 1300
|
# mtu: 1300
|
||||||
# metric: 100
|
# metric: 100
|
||||||
|
# install: true
|
||||||
|
|
||||||
|
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
|
||||||
|
# in nebula configuration files. Default false, not reloadable.
|
||||||
|
#use_system_route_table: false
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# Configure logging level
|
# Configure logging level
|
||||||
@@ -258,6 +282,15 @@ logging:
|
|||||||
|
|
||||||
# Nebula security group configuration
|
# Nebula security group configuration
|
||||||
firewall:
|
firewall:
|
||||||
|
# Action to take when a packet is not allowed by the firewall rules.
|
||||||
|
# Can be one of:
|
||||||
|
# `drop` (default): silently drop the packet.
|
||||||
|
# `reject`: send a reject reply.
|
||||||
|
# - For TCP, this will be a RST "Connection Reset" packet.
|
||||||
|
# - For other protocols, this will be an ICMP port unreachable packet.
|
||||||
|
outbound_action: drop
|
||||||
|
inbound_action: drop
|
||||||
|
|
||||||
conntrack:
|
conntrack:
|
||||||
tcp_timeout: 12m
|
tcp_timeout: 12m
|
||||||
udp_timeout: 3m
|
udp_timeout: 3m
|
||||||
@@ -272,7 +305,8 @@ firewall:
|
|||||||
# host: `any` or a literal hostname, ie `test-host`
|
# host: `any` or a literal hostname, ie `test-host`
|
||||||
# group: `any` or a literal group name, ie `default-group`
|
# group: `any` or a literal group name, ie `default-group`
|
||||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||||
# cidr: a CIDR, `0.0.0.0/0` is any.
|
# cidr: a remote CIDR, `0.0.0.0/0` is any.
|
||||||
|
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
|
||||||
# ca_name: An issuing CA name
|
# ca_name: An issuing CA name
|
||||||
# ca_sha: An issuing CA shasum
|
# ca_sha: An issuing CA shasum
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=nebula
|
Description=Nebula overlay networking tool
|
||||||
Wants=basic.target
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target
|
After=basic.target network.target network-online.target
|
||||||
|
Before=sshd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
SyslogIdentifier=nebula
|
SyslogIdentifier=nebula
|
||||||
|
|||||||
34
examples/service_scripts/nebula.plist
Normal file
34
examples/service_scripts/nebula.plist
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||||
|
<plist version="1.0">
|
||||||
|
<dict>
|
||||||
|
<key>KeepAlive</key>
|
||||||
|
<true/>
|
||||||
|
<key>Label</key>
|
||||||
|
<string>net.defined.nebula</string>
|
||||||
|
<key>WorkingDirectory</key>
|
||||||
|
<string>/Users/{username}/.local/bin/nebula</string>
|
||||||
|
<key>LimitLoadToSessionType</key>
|
||||||
|
<array>
|
||||||
|
<string>Aqua</string>
|
||||||
|
<string>Background</string>
|
||||||
|
<string>LoginWindow</string>
|
||||||
|
<string>StandardIO</string>
|
||||||
|
<string>System</string>
|
||||||
|
</array>
|
||||||
|
<key>ProgramArguments</key>
|
||||||
|
<array>
|
||||||
|
<string>./nebula</string>
|
||||||
|
<string>-config</string>
|
||||||
|
<string>./config.yml</string>
|
||||||
|
</array>
|
||||||
|
<key>RunAtLoad</key>
|
||||||
|
<true/>
|
||||||
|
<key>StandardErrorPath</key>
|
||||||
|
<string>./nebula.log</string>
|
||||||
|
<key>StandardOutPath</key>
|
||||||
|
<string>./nebula.log</string>
|
||||||
|
<key>UserName</key>
|
||||||
|
<string>root</string>
|
||||||
|
</dict>
|
||||||
|
</plist>
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=nebula
|
Description=Nebula overlay networking tool
|
||||||
Wants=basic.target
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
After=basic.target network.target
|
After=basic.target network.target network-online.target
|
||||||
Before=sshd.service
|
Before=sshd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
|||||||
134
firewall.go
134
firewall.go
@@ -25,7 +25,7 @@ const tcpACK = 0x10
|
|||||||
const tcpFIN = 0x01
|
const tcpFIN = 0x01
|
||||||
|
|
||||||
type FirewallInterface interface {
|
type FirewallInterface interface {
|
||||||
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error
|
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type conn struct {
|
type conn struct {
|
||||||
@@ -47,6 +47,9 @@ type Firewall struct {
|
|||||||
InRules *FirewallTable
|
InRules *FirewallTable
|
||||||
OutRules *FirewallTable
|
OutRules *FirewallTable
|
||||||
|
|
||||||
|
InSendReject bool
|
||||||
|
OutSendReject bool
|
||||||
|
|
||||||
//TODO: we should have many more options for TCP, an option for ICMP, and mimic the kernel a bit better
|
//TODO: we should have many more options for TCP, an option for ICMP, and mimic the kernel a bit better
|
||||||
// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
|
// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
|
||||||
TCPTimeout time.Duration //linux: 5 days max
|
TCPTimeout time.Duration //linux: 5 days max
|
||||||
@@ -77,7 +80,7 @@ type FirewallConntrack struct {
|
|||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
Conns map[firewall.Packet]*conn
|
Conns map[firewall.Packet]*conn
|
||||||
TimerWheel *TimerWheel
|
TimerWheel *TimerWheel[firewall.Packet]
|
||||||
}
|
}
|
||||||
|
|
||||||
type FirewallTable struct {
|
type FirewallTable struct {
|
||||||
@@ -103,11 +106,12 @@ type FirewallCA struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type FirewallRule struct {
|
type FirewallRule struct {
|
||||||
// Any makes Hosts, Groups, and CIDR irrelevant
|
// Any makes Hosts, Groups, CIDR and LocalCIDR irrelevant
|
||||||
Any bool
|
Any bool
|
||||||
Hosts map[string]struct{}
|
Hosts map[string]struct{}
|
||||||
Groups [][]string
|
Groups [][]string
|
||||||
CIDR *cidr.Tree4
|
CIDR *cidr.Tree4
|
||||||
|
LocalCIDR *cidr.Tree4
|
||||||
}
|
}
|
||||||
|
|
||||||
// Even though ports are uint16, int32 maps are faster for lookup
|
// Even though ports are uint16, int32 maps are faster for lookup
|
||||||
@@ -145,7 +149,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
|||||||
return &Firewall{
|
return &Firewall{
|
||||||
Conntrack: &FirewallConntrack{
|
Conntrack: &FirewallConntrack{
|
||||||
Conns: make(map[firewall.Packet]*conn),
|
Conns: make(map[firewall.Packet]*conn),
|
||||||
TimerWheel: NewTimerWheel(min, max),
|
TimerWheel: NewTimerWheel[firewall.Packet](min, max),
|
||||||
},
|
},
|
||||||
InRules: newFirewallTable(),
|
InRules: newFirewallTable(),
|
||||||
OutRules: newFirewallTable(),
|
OutRules: newFirewallTable(),
|
||||||
@@ -179,6 +183,28 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
|||||||
//TODO: max_connections
|
//TODO: max_connections
|
||||||
)
|
)
|
||||||
|
|
||||||
|
inboundAction := c.GetString("firewall.inbound_action", "drop")
|
||||||
|
switch inboundAction {
|
||||||
|
case "reject":
|
||||||
|
fw.InSendReject = true
|
||||||
|
case "drop":
|
||||||
|
fw.InSendReject = false
|
||||||
|
default:
|
||||||
|
l.WithField("action", inboundAction).Warn("invalid firewall.inbound_action, defaulting to `drop`")
|
||||||
|
fw.InSendReject = false
|
||||||
|
}
|
||||||
|
|
||||||
|
outboundAction := c.GetString("firewall.outbound_action", "drop")
|
||||||
|
switch outboundAction {
|
||||||
|
case "reject":
|
||||||
|
fw.OutSendReject = true
|
||||||
|
case "drop":
|
||||||
|
fw.OutSendReject = false
|
||||||
|
default:
|
||||||
|
l.WithField("action", inboundAction).Warn("invalid firewall.outbound_action, defaulting to `drop`")
|
||||||
|
fw.OutSendReject = false
|
||||||
|
}
|
||||||
|
|
||||||
err := AddFirewallRulesFromConfig(l, false, c, fw)
|
err := AddFirewallRulesFromConfig(l, false, c, fw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -193,18 +219,22 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddRule properly creates the in memory rule structure for a firewall table.
|
// AddRule properly creates the in memory rule structure for a firewall table.
|
||||||
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
||||||
// https://github.com/golang/go/issues/14131
|
// https://github.com/golang/go/issues/14131
|
||||||
sIp := ""
|
sIp := ""
|
||||||
if ip != nil {
|
if ip != nil {
|
||||||
sIp = ip.String()
|
sIp = ip.String()
|
||||||
}
|
}
|
||||||
|
lIp := ""
|
||||||
|
if localIp != nil {
|
||||||
|
lIp = localIp.String()
|
||||||
|
}
|
||||||
|
|
||||||
// We need this rule string because we generate a hash. Removing this will break firewall reload.
|
// We need this rule string because we generate a hash. Removing this will break firewall reload.
|
||||||
ruleString := fmt.Sprintf(
|
ruleString := fmt.Sprintf(
|
||||||
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s",
|
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, localIp: %v, caName: %v, caSha: %s",
|
||||||
incoming, proto, startPort, endPort, groups, host, sIp, caName, caSha,
|
incoming, proto, startPort, endPort, groups, host, sIp, lIp, caName, caSha,
|
||||||
)
|
)
|
||||||
f.rules += ruleString + "\n"
|
f.rules += ruleString + "\n"
|
||||||
|
|
||||||
@@ -212,7 +242,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
|||||||
if !incoming {
|
if !incoming {
|
||||||
direction = "outgoing"
|
direction = "outgoing"
|
||||||
}
|
}
|
||||||
f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "caName": caName, "caSha": caSha}).
|
f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "localIp": lIp, "caName": caName, "caSha": caSha}).
|
||||||
Info("Firewall rule added")
|
Info("Firewall rule added")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -239,7 +269,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
|||||||
return fmt.Errorf("unknown protocol %v", proto)
|
return fmt.Errorf("unknown protocol %v", proto)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fp.addRule(startPort, endPort, groups, host, ip, caName, caSha)
|
return fp.addRule(startPort, endPort, groups, host, ip, localIp, caName, caSha)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRuleHash returns a hash representation of all inbound and outbound rules
|
// GetRuleHash returns a hash representation of all inbound and outbound rules
|
||||||
@@ -277,8 +307,8 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
|||||||
return fmt.Errorf("%s rule #%v; only one of port or code should be provided", table, i)
|
return fmt.Errorf("%s rule #%v; only one of port or code should be provided", table, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Host == "" && len(r.Groups) == 0 && r.Group == "" && r.Cidr == "" && r.CAName == "" && r.CASha == "" {
|
if r.Host == "" && len(r.Groups) == 0 && r.Group == "" && r.Cidr == "" && r.LocalCidr == "" && r.CAName == "" && r.CASha == "" {
|
||||||
return fmt.Errorf("%s rule #%v; at least one of host, group, cidr, ca_name, or ca_sha must be provided", table, i)
|
return fmt.Errorf("%s rule #%v; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided", table, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(r.Groups) > 0 {
|
if len(r.Groups) > 0 {
|
||||||
@@ -330,7 +360,15 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = fw.AddRule(inbound, proto, startPort, endPort, groups, r.Host, cidr, r.CAName, r.CASha)
|
var localCidr *net.IPNet
|
||||||
|
if r.LocalCidr != "" {
|
||||||
|
_, localCidr, err = net.ParseCIDR(r.LocalCidr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fw.AddRule(inbound, proto, startPort, endPort, groups, r.Host, cidr, localCidr, r.CAName, r.CASha)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s rule #%v; `%s`", table, i, err)
|
return fmt.Errorf("%s rule #%v; `%s`", table, i, err)
|
||||||
}
|
}
|
||||||
@@ -510,6 +548,7 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
|
|||||||
conntrack := f.Conntrack
|
conntrack := f.Conntrack
|
||||||
conntrack.Lock()
|
conntrack.Lock()
|
||||||
if _, ok := conntrack.Conns[fp]; !ok {
|
if _, ok := conntrack.Conns[fp]; !ok {
|
||||||
|
conntrack.TimerWheel.Advance(time.Now())
|
||||||
conntrack.TimerWheel.Add(fp, timeout)
|
conntrack.TimerWheel.Add(fp, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -537,6 +576,7 @@ func (f *Firewall) evict(p firewall.Packet) {
|
|||||||
|
|
||||||
// Timeout is in the future, re-add the timer
|
// Timeout is in the future, re-add the timer
|
||||||
if newT > 0 {
|
if newT > 0 {
|
||||||
|
conntrack.TimerWheel.Advance(time.Now())
|
||||||
conntrack.TimerWheel.Add(p, newT)
|
conntrack.TimerWheel.Add(p, newT)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -568,7 +608,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaC
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
if startPort > endPort {
|
if startPort > endPort {
|
||||||
return fmt.Errorf("start port was lower than end port")
|
return fmt.Errorf("start port was lower than end port")
|
||||||
}
|
}
|
||||||
@@ -581,7 +621,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fp[i].addRule(groups, host, ip, caName, caSha); err != nil {
|
if err := fp[i].addRule(groups, host, ip, localIp, caName, caSha); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -612,12 +652,13 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
|
|||||||
return fp[firewall.PortAny].match(p, c, caPool)
|
return fp[firewall.PortAny].match(p, c, caPool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error {
|
func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error {
|
||||||
fr := func() *FirewallRule {
|
fr := func() *FirewallRule {
|
||||||
return &FirewallRule{
|
return &FirewallRule{
|
||||||
Hosts: make(map[string]struct{}),
|
Hosts: make(map[string]struct{}),
|
||||||
Groups: make([][]string, 0),
|
Groups: make([][]string, 0),
|
||||||
CIDR: cidr.NewTree4(),
|
CIDR: cidr.NewTree4(),
|
||||||
|
LocalCIDR: cidr.NewTree4(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -626,14 +667,14 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
|
|||||||
fc.Any = fr()
|
fc.Any = fr()
|
||||||
}
|
}
|
||||||
|
|
||||||
return fc.Any.addRule(groups, host, ip)
|
return fc.Any.addRule(groups, host, ip, localIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if caSha != "" {
|
if caSha != "" {
|
||||||
if _, ok := fc.CAShas[caSha]; !ok {
|
if _, ok := fc.CAShas[caSha]; !ok {
|
||||||
fc.CAShas[caSha] = fr()
|
fc.CAShas[caSha] = fr()
|
||||||
}
|
}
|
||||||
err := fc.CAShas[caSha].addRule(groups, host, ip)
|
err := fc.CAShas[caSha].addRule(groups, host, ip, localIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -643,7 +684,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
|
|||||||
if _, ok := fc.CANames[caName]; !ok {
|
if _, ok := fc.CANames[caName]; !ok {
|
||||||
fc.CANames[caName] = fr()
|
fc.CANames[caName] = fr()
|
||||||
}
|
}
|
||||||
err := fc.CANames[caName].addRule(groups, host, ip)
|
err := fc.CANames[caName].addRule(groups, host, ip, localIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -675,17 +716,18 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool
|
|||||||
return fc.CANames[s.Details.Name].match(p, c)
|
return fc.CANames[s.Details.Name].match(p, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) error {
|
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, localIp *net.IPNet) error {
|
||||||
if fr.Any {
|
if fr.Any {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if fr.isAny(groups, host, ip) {
|
if fr.isAny(groups, host, ip, localIp) {
|
||||||
fr.Any = true
|
fr.Any = true
|
||||||
// If it's any we need to wipe out any pre-existing rules to save on memory
|
// If it's any we need to wipe out any pre-existing rules to save on memory
|
||||||
fr.Groups = make([][]string, 0)
|
fr.Groups = make([][]string, 0)
|
||||||
fr.Hosts = make(map[string]struct{})
|
fr.Hosts = make(map[string]struct{})
|
||||||
fr.CIDR = cidr.NewTree4()
|
fr.CIDR = cidr.NewTree4()
|
||||||
|
fr.LocalCIDR = cidr.NewTree4()
|
||||||
} else {
|
} else {
|
||||||
if len(groups) > 0 {
|
if len(groups) > 0 {
|
||||||
fr.Groups = append(fr.Groups, groups)
|
fr.Groups = append(fr.Groups, groups)
|
||||||
@@ -698,13 +740,17 @@ func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) err
|
|||||||
if ip != nil {
|
if ip != nil {
|
||||||
fr.CIDR.AddCIDR(ip, struct{}{})
|
fr.CIDR.AddCIDR(ip, struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if localIp != nil {
|
||||||
|
fr.LocalCIDR.AddCIDR(localIp, struct{}{})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool {
|
func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPNet) bool {
|
||||||
if len(groups) == 0 && host == "" && ip == nil {
|
if len(groups) == 0 && host == "" && ip == nil && localIp == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -722,6 +768,10 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if localIp != nil && localIp.Contains(net.IPv4(0, 0, 0, 0)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -763,20 +813,25 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fr.LocalCIDR != nil && fr.LocalCIDR.Contains(p.LocalIP) != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// No host, group, or cidr matched, bye bye
|
// No host, group, or cidr matched, bye bye
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
type rule struct {
|
type rule struct {
|
||||||
Port string
|
Port string
|
||||||
Code string
|
Code string
|
||||||
Proto string
|
Proto string
|
||||||
Host string
|
Host string
|
||||||
Group string
|
Group string
|
||||||
Groups []string
|
Groups []string
|
||||||
Cidr string
|
Cidr string
|
||||||
CAName string
|
LocalCidr string
|
||||||
CASha string
|
CAName string
|
||||||
|
CASha string
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
|
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
|
||||||
@@ -800,6 +855,7 @@ func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, er
|
|||||||
r.Proto = toString("proto", m)
|
r.Proto = toString("proto", m)
|
||||||
r.Host = toString("host", m)
|
r.Host = toString("host", m)
|
||||||
r.Cidr = toString("cidr", m)
|
r.Cidr = toString("cidr", m)
|
||||||
|
r.LocalCidr = toString("local_cidr", m)
|
||||||
r.CAName = toString("ca_name", m)
|
r.CAName = toString("ca_name", m)
|
||||||
r.CASha = toString("ca_sha", m)
|
r.CASha = toString("ca_sha", m)
|
||||||
|
|
||||||
@@ -879,7 +935,7 @@ func parsePort(s string) (startPort, endPort int32, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: write tests for these
|
// TODO: write tests for these
|
||||||
func setTCPRTTTracking(c *conn, p []byte) {
|
func setTCPRTTTracking(c *conn, p []byte) {
|
||||||
if c.Seq != 0 {
|
if c.Seq != 0 {
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ type ConntrackCache map[Packet]struct{}
|
|||||||
|
|
||||||
type ConntrackCacheTicker struct {
|
type ConntrackCacheTicker struct {
|
||||||
cacheV uint64
|
cacheV uint64
|
||||||
cacheTick uint64
|
cacheTick atomic.Uint64
|
||||||
|
|
||||||
cache ConntrackCache
|
cache ConntrackCache
|
||||||
}
|
}
|
||||||
@@ -35,7 +35,7 @@ func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
|
|||||||
func (c *ConntrackCacheTicker) tick(d time.Duration) {
|
func (c *ConntrackCacheTicker) tick(d time.Duration) {
|
||||||
for {
|
for {
|
||||||
time.Sleep(d)
|
time.Sleep(d)
|
||||||
atomic.AddUint64(&c.cacheTick, 1)
|
c.cacheTick.Add(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache {
|
|||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if tick := atomic.LoadUint64(&c.cacheTick); tick != c.cacheV {
|
if tick := c.cacheTick.Load(); tick != c.cacheV {
|
||||||
c.cacheV = tick
|
c.cacheV = tick
|
||||||
if ll := len(c.cache); ll > 0 {
|
if ll := len(c.cache); ll > 0 {
|
||||||
if l.Level == logrus.DebugLevel {
|
if l.Level == logrus.DebugLevel {
|
||||||
|
|||||||
207
firewall_test.go
207
firewall_test.go
@@ -34,27 +34,27 @@ func TestNewFirewall(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
|
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
|
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
|
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
|
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
|
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
|
||||||
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_AddRule(t *testing.T) {
|
func TestFirewall_AddRule(t *testing.T) {
|
||||||
@@ -69,67 +69,75 @@ func TestFirewall_AddRule(t *testing.T) {
|
|||||||
|
|
||||||
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
||||||
|
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", ""))
|
||||||
// An empty rule is any
|
// An empty rule is any
|
||||||
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", ""))
|
||||||
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
||||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
||||||
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", ""))
|
||||||
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
||||||
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", ""))
|
||||||
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", ""))
|
||||||
|
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||||
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||||
|
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||||
|
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha"))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
||||||
|
|
||||||
// Set any and clear fields
|
// Set any and clear fields
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", ""))
|
||||||
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
||||||
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||||
|
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||||
|
|
||||||
// run twice just to make sure
|
// run twice just to make sure
|
||||||
//TODO: these ANY rules should clear the CA firewall portion
|
//TODO: these ANY rules should clear the CA firewall portion
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
|
|
||||||
// Test error conditions
|
// Test error conditions
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", ""))
|
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, nil, "", ""))
|
||||||
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, "", ""))
|
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, nil, "", ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop(t *testing.T) {
|
func TestFirewall_Drop(t *testing.T) {
|
||||||
@@ -138,12 +146,12 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
LocalPort: 10,
|
||||||
90,
|
RemotePort: 90,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -169,7 +177,7 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
@@ -188,28 +196,28 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
|
|
||||||
// ensure signer doesn't get in the way of group checks
|
// ensure signer doesn't get in the way of group checks
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum"))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caSha doesn't drop on match
|
// test caSha doesn't drop on match
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum"))
|
||||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
|
|
||||||
// ensure ca name doesn't get in the way of group checks
|
// ensure ca name doesn't get in the way of group checks
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", ""))
|
||||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caName doesn't drop on match
|
// test caName doesn't drop on match
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", ""))
|
||||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,11 +227,11 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, n, _ := net.ParseCIDR("172.1.1.1/32")
|
_, n, _ := net.ParseCIDR("172.1.1.1/32")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, n, "", "")
|
||||||
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, n, "", "")
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
b.Run("fail on proto", func(b *testing.B) {
|
b.Run("fail on proto", func(b *testing.B) {
|
||||||
@@ -291,7 +299,20 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "")
|
b.Run("pass on local ip", func(b *testing.B) {
|
||||||
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
c := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
Name: "good-host",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
|
||||||
|
|
||||||
b.Run("pass on ip with any port", func(b *testing.B) {
|
b.Run("pass on ip with any port", func(b *testing.B) {
|
||||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
@@ -305,6 +326,19 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
b.Run("pass on local ip with any port", func(b *testing.B) {
|
||||||
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
c := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
Name: "good-host",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop2(t *testing.T) {
|
func TestFirewall_Drop2(t *testing.T) {
|
||||||
@@ -313,12 +347,12 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
LocalPort: 10,
|
||||||
90,
|
RemotePort: 90,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -356,7 +390,7 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
h1.CreateRemoteCIDR(&c1)
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// h1/c1 lacks the proper groups
|
// h1/c1 lacks the proper groups
|
||||||
@@ -372,12 +406,12 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
1,
|
LocalPort: 1,
|
||||||
1,
|
RemotePort: 1,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -438,8 +472,8 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
h3.CreateRemoteCIDR(&c3)
|
h3.CreateRemoteCIDR(&c3)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, nil, "", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, nil, "", "signer-sha"))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// c1 should pass because host match
|
// c1 should pass because host match
|
||||||
@@ -458,12 +492,12 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
LocalPort: 10,
|
||||||
90,
|
RemotePort: 90,
|
||||||
firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
@@ -489,7 +523,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
@@ -502,7 +536,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
|
|
||||||
oldFw := fw
|
oldFw := fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, nil, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
@@ -511,7 +545,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
|
|
||||||
oldFw = fw
|
oldFw = fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, nil, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
@@ -653,7 +687,7 @@ func TestNewFirewallFromConfig(t *testing.T) {
|
|||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
||||||
_, err = NewFirewallFromConfig(l, c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided")
|
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided")
|
||||||
|
|
||||||
// Test code/port error
|
// Test code/port error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
@@ -677,6 +711,12 @@ func TestNewFirewallFromConfig(t *testing.T) {
|
|||||||
_, err = NewFirewallFromConfig(l, c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
||||||
|
|
||||||
|
// Test local_cidr parse error
|
||||||
|
conf = config.NewC(l)
|
||||||
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
||||||
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
|
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; invalid CIDR address: testh")
|
||||||
|
|
||||||
// Test both group and groups
|
// Test both group and groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
||||||
@@ -691,63 +731,78 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
|||||||
mf := &mockFirewall{}
|
mf := &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding udp rule
|
// Test adding udp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding icmp rule
|
// Test adding icmp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding any rule
|
// Test adding any rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
|
// Test adding rule with cidr
|
||||||
|
cidr := &net.IPNet{IP: net.ParseIP("10.0.0.0").To4(), Mask: net.IPv4Mask(255, 0, 0, 0)}
|
||||||
|
conf = config.NewC(l)
|
||||||
|
mf = &mockFirewall{}
|
||||||
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
||||||
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
|
// Test adding rule with local_cidr
|
||||||
|
conf = config.NewC(l)
|
||||||
|
mf = &mockFirewall{}
|
||||||
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
||||||
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: cidr}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_sha
|
// Test adding rule with ca_sha
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caSha: "12312313123"}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_name
|
// Test adding rule with ca_name
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caName: "root01"}, mf.lastCall)
|
||||||
|
|
||||||
// Test single group
|
// Test single group
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test single groups
|
// Test single groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test multiple AND groups
|
// Test multiple AND groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test Add error
|
// Test Add error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
@@ -892,6 +947,7 @@ type addRuleCall struct {
|
|||||||
groups []string
|
groups []string
|
||||||
host string
|
host string
|
||||||
ip *net.IPNet
|
ip *net.IPNet
|
||||||
|
localIp *net.IPNet
|
||||||
caName string
|
caName string
|
||||||
caSha string
|
caSha string
|
||||||
}
|
}
|
||||||
@@ -901,7 +957,7 @@ type mockFirewall struct {
|
|||||||
nextCallReturn error
|
nextCallReturn error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
mf.lastCall = addRuleCall{
|
mf.lastCall = addRuleCall{
|
||||||
incoming: incoming,
|
incoming: incoming,
|
||||||
proto: proto,
|
proto: proto,
|
||||||
@@ -910,6 +966,7 @@ func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, end
|
|||||||
groups: groups,
|
groups: groups,
|
||||||
host: host,
|
host: host,
|
||||||
ip: ip,
|
ip: ip,
|
||||||
|
localIp: localIp,
|
||||||
caName: caName,
|
caName: caName,
|
||||||
caSha: caSha,
|
caSha: caSha,
|
||||||
}
|
}
|
||||||
|
|||||||
49
go.mod
49
go.mod
@@ -1,6 +1,6 @@
|
|||||||
module github.com/slackhq/nebula
|
module github.com/slackhq/nebula
|
||||||
|
|
||||||
go 1.18
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||||
@@ -9,40 +9,41 @@ require (
|
|||||||
github.com/flynn/noise v1.0.0
|
github.com/flynn/noise v1.0.0
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/google/gopacket v1.1.19
|
github.com/google/gopacket v1.1.19
|
||||||
github.com/imdario/mergo v0.3.8
|
github.com/imdario/mergo v0.3.15
|
||||||
github.com/kardianos/service v1.2.1
|
github.com/kardianos/service v1.2.2
|
||||||
github.com/miekg/dns v1.1.48
|
github.com/miekg/dns v1.1.54
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||||
github.com/prometheus/client_golang v1.12.1
|
github.com/prometheus/client_golang v1.15.1
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.9.0
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
||||||
github.com/stretchr/testify v1.7.1
|
github.com/stretchr/testify v1.8.2
|
||||||
github.com/vishvananda/netlink v1.1.0
|
github.com/vishvananda/netlink v1.1.0
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29
|
golang.org/x/crypto v0.8.0
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
|
||||||
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71
|
golang.org/x/net v0.9.0
|
||||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
|
golang.org/x/sys v0.8.0
|
||||||
|
golang.org/x/term v0.8.0
|
||||||
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||||
google.golang.org/protobuf v1.28.0
|
google.golang.org/protobuf v1.30.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
github.com/prometheus/common v0.33.0 // indirect
|
github.com/prometheus/common v0.42.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect
|
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
github.com/vishvananda/netns v0.0.4 // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/mod v0.10.0 // indirect
|
||||||
golang.org/x/tools v0.1.10 // indirect
|
golang.org/x/tools v0.8.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
435
go.sum
435
go.sum
@@ -1,38 +1,4 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
|
||||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
|
||||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
|
||||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
|
||||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
|
||||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
|
||||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
|
||||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
|
||||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
|
||||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
|
||||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
|
||||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
|
||||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
|
||||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
|
||||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
|
||||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
|
||||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
|
||||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
|
||||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
|
||||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
|
||||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
@@ -46,134 +12,78 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
|||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
|
||||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
|
||||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
|
||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
|
||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk=
|
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
|
||||||
github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
|
||||||
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.1.48 h1:Ucfr7IIVyMBz4lRE8qmGUuZ4Wt3/ZGu9hmcMT3Uu4tQ=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
||||||
|
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
|
||||||
@@ -187,371 +97,146 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||||
|
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||||
github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE=
|
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||||
github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
||||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||||
|
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
|
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o=
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
|
||||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
|
||||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
|
||||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
|
||||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
|
||||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0=
|
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 h1:PRD0hj6tTuUnCFD08vkvjkYFbQg/9lV8KIxe1y4/cvU=
|
|
||||||
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
|
||||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
|
||||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY=
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
|
||||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
|
||||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
|
||||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
|
||||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
|
||||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
|
||||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
|
||||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
@@ -560,15 +245,5 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H, hostinfo *HostInfo) {
|
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H, hostinfo *HostInfo) {
|
||||||
// First remote allow list check before we know the vpnIp
|
// First remote allow list check before we know the vpnIp
|
||||||
if addr != nil {
|
if addr != nil {
|
||||||
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
@@ -51,7 +50,7 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
||||||
atomic.AddUint64(&ci.atomicMessageCounter, 1)
|
ci.messageCounter.Add(1)
|
||||||
|
|
||||||
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -69,7 +68,7 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
|||||||
hostinfo.handshakeStart = time.Now()
|
hostinfo.handshakeStart = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H) {
|
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||||
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0)
|
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0)
|
||||||
// Mark packet 1 as seen so it doesn't show up as missed
|
// Mark packet 1 as seen so it doesn't show up as missed
|
||||||
ci.window.Update(f.l, 1)
|
ci.window.Update(f.l, 1)
|
||||||
@@ -208,9 +207,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
hostinfo.SetRemote(addr)
|
hostinfo.SetRemote(addr)
|
||||||
hostinfo.CreateRemoteCIDR(remoteCert)
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
|
|
||||||
// Only overwrite existing record if we should win the handshake race
|
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f)
|
||||||
overwrite := vpnIp > f.myVpnIp
|
|
||||||
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, overwrite, f)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err {
|
switch err {
|
||||||
case ErrAlreadySeen:
|
case ErrAlreadySeen:
|
||||||
@@ -243,14 +240,13 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
via2 := via.(*ViaSender)
|
if via == nil {
|
||||||
if via2 == nil {
|
|
||||||
f.l.Error("Handshake send failed: both addr and via are nil.")
|
f.l.Error("Handshake send failed: both addr and via are nil.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via2.relayHI.vpnIp).
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
return
|
return
|
||||||
@@ -281,16 +277,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
|
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
|
||||||
Error("Failed to add HostInfo due to localIndex collision")
|
Error("Failed to add HostInfo due to localIndex collision")
|
||||||
return
|
return
|
||||||
case ErrExistingHandshake:
|
|
||||||
// We have a race where both parties think they are an initiator and this tunnel lost, let the other one finish
|
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("fingerprint", fingerprint).
|
|
||||||
WithField("issuer", issuer).
|
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
Error("Prevented a pending handshake race")
|
|
||||||
return
|
|
||||||
default:
|
default:
|
||||||
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
|
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
|
||||||
// And we forget to update it here
|
// And we forget to update it here
|
||||||
@@ -328,14 +314,13 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
via2 := via.(*ViaSender)
|
if via == nil {
|
||||||
if via2 == nil {
|
|
||||||
f.l.Error("Handshake send failed: both addr and via are nil.")
|
f.l.Error("Handshake send failed: both addr and via are nil.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("relay", via2.relayHI.vpnIp).
|
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
@@ -345,12 +330,13 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
|
|||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *HostInfo, packet []byte, h *header.H) bool {
|
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *HostInfo, packet []byte, h *header.H) bool {
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
// Nothing here to tear down, got a bogus stage 2 packet
|
// Nothing here to tear down, got a bogus stage 2 packet
|
||||||
return true
|
return true
|
||||||
@@ -494,16 +480,15 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
|
|||||||
if addr != nil {
|
if addr != nil {
|
||||||
hostinfo.SetRemote(addr)
|
hostinfo.SetRemote(addr)
|
||||||
} else {
|
} else {
|
||||||
via2 := via.(*ViaSender)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build up the radix for the firewall if we have subnets in the cert
|
// Build up the radix for the firewall if we have subnets in the cert
|
||||||
hostinfo.CreateRemoteCIDR(remoteCert)
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
|
|
||||||
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||||
//TODO: Complete here does not do a race avoidance, it will just take the new tunnel. Is this ok?
|
|
||||||
f.handshakeManager.Complete(hostinfo, f)
|
f.handshakeManager.Complete(hostinfo, f)
|
||||||
|
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||||
f.metricHandshakes.Update(duration)
|
f.metricHandshakes.Update(duration)
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ type HandshakeManager struct {
|
|||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
outside *udp.Conn
|
outside *udp.Conn
|
||||||
config HandshakeConfig
|
config HandshakeConfig
|
||||||
OutboundHandshakeTimer *SystemTimerWheel
|
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
|
||||||
messageMetrics *MessageMetrics
|
messageMetrics *MessageMetrics
|
||||||
metricInitiated metrics.Counter
|
metricInitiated metrics.Counter
|
||||||
metricTimedOut metrics.Counter
|
metricTimedOut metrics.Counter
|
||||||
@@ -65,7 +65,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
|
|||||||
outside: outside,
|
outside: outside,
|
||||||
config: config,
|
config: config,
|
||||||
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
|
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
|
||||||
OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
||||||
messageMetrics: config.messageMetrics,
|
messageMetrics: config.messageMetrics,
|
||||||
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
||||||
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
||||||
@@ -73,7 +73,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) {
|
func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
|
||||||
clockSource := time.NewTicker(c.config.tryInterval)
|
clockSource := time.NewTicker(c.config.tryInterval)
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
@@ -89,19 +89,18 @@ func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) {
|
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) {
|
||||||
c.OutboundHandshakeTimer.advance(now)
|
c.OutboundHandshakeTimer.Advance(now)
|
||||||
for {
|
for {
|
||||||
ep := c.OutboundHandshakeTimer.Purge()
|
vpnIp, has := c.OutboundHandshakeTimer.Purge()
|
||||||
if ep == nil {
|
if !has {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
vpnIp := ep.(iputil.VpnIp)
|
|
||||||
c.handleOutbound(vpnIp, f, false)
|
c.handleOutbound(vpnIp, f, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) {
|
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, lighthouseTriggered bool) {
|
||||||
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
|
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@@ -137,31 +136,37 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only care about a lighthouse trigger before the first handshake transmit attempt. This is a very specific
|
|
||||||
// optimization for a fast lighthouse reply
|
|
||||||
//TODO: it would feel better to do this once, anytime, as our delay increases over time
|
|
||||||
if lighthouseTriggered && hostinfo.HandshakeCounter > 0 {
|
|
||||||
// If we didn't return here a lighthouse could cause us to aggressively send handshakes
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a remotes object if we don't already have one.
|
// Get a remotes object if we don't already have one.
|
||||||
// This is mainly to protect us as this should never be the case
|
// This is mainly to protect us as this should never be the case
|
||||||
// NB ^ This comment doesn't jive. It's how the thing gets intiailized.
|
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
|
||||||
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
||||||
if hostinfo.remotes == nil {
|
if hostinfo.remotes == nil {
|
||||||
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
|
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: this will generate a load of queries for hosts with only 1 ip (i'm not using a lighthouse, static mapped)
|
remotes := hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)
|
||||||
if hostinfo.remotes.Len(c.pendingHostMap.preferredRanges) <= 1 {
|
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hostinfo.HandshakeLastRemotes)
|
||||||
|
|
||||||
|
// We only care about a lighthouse trigger if we have new remotes to send to.
|
||||||
|
// This is a very specific optimization for a fast lighthouse reply.
|
||||||
|
if lighthouseTriggered && !remotesHaveChanged {
|
||||||
|
// If we didn't return here a lighthouse could cause us to aggressively send handshakes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hostinfo.HandshakeLastRemotes = remotes
|
||||||
|
|
||||||
|
// TODO: this will generate a load of queries for hosts with only 1 ip
|
||||||
|
// (such as ones registered to the lighthouse with only a private IP)
|
||||||
|
// So we only do it one time after attempting 5 handshakes already.
|
||||||
|
if len(remotes) <= 1 && hostinfo.HandshakeCounter == 5 {
|
||||||
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
||||||
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
||||||
// the learned public ip for them. Query again to short circuit the promotion counter
|
// the learned public ip for them. Query again to short circuit the promotion counter
|
||||||
c.lightHouse.QueryServer(vpnIp, f)
|
c.lightHouse.QueryServer(vpnIp, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send a the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
||||||
var sentTo []*udp.Addr
|
var sentTo []*udp.Addr
|
||||||
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
||||||
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||||
@@ -177,16 +182,22 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout
|
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout,
|
||||||
if len(sentTo) > 0 {
|
// so only log when the list of remotes has changed
|
||||||
|
if remotesHaveChanged {
|
||||||
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
|
} else if c.l.IsLevelEnabled(logrus.DebugLevel) {
|
||||||
|
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
||||||
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
Debug("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
||||||
hostinfo.logger(c.l).WithField("relayIps", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
hostinfo.logger(c.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||||
// Send a RelayRequest to all known Relay IP's
|
// Send a RelayRequest to all known Relay IP's
|
||||||
for _, relay := range hostinfo.remotes.relays {
|
for _, relay := range hostinfo.remotes.relays {
|
||||||
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
||||||
@@ -195,7 +206,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
|
|||||||
}
|
}
|
||||||
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
|
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
|
||||||
if err != nil || relayHostInfo.remote == nil {
|
if err != nil || relayHostInfo.remote == nil {
|
||||||
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target.")
|
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||||
f.Handshake(*relay)
|
f.Handshake(*relay)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -220,13 +231,20 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
|
|||||||
WithError(err).
|
WithError(err).
|
||||||
Error("Failed to marshal Control message to create relay")
|
Error("Failed to marshal Control message to create relay")
|
||||||
} else {
|
} else {
|
||||||
f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu))
|
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||||
|
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
c.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": c.lightHouse.myVpnIp,
|
||||||
|
"relayTo": vpnIp,
|
||||||
|
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||||
|
"relay": *relay}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
hostinfo.logger(c.l).
|
hostinfo.logger(c.l).
|
||||||
WithField("vpnIp", vpnIp).
|
WithField("vpnIp", vpnIp).
|
||||||
WithField("state", existingRelay.State).
|
WithField("state", existingRelay.State).
|
||||||
WithField("relayVpnIp", relayHostInfo.vpnIp).
|
WithField("relay", relayHostInfo.vpnIp).
|
||||||
Errorf("Relay unexpected state")
|
Errorf("Relay unexpected state")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -249,7 +267,13 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
|
|||||||
WithError(err).
|
WithError(err).
|
||||||
Error("Failed to marshal Control message to create relay")
|
Error("Failed to marshal Control message to create relay")
|
||||||
} else {
|
} else {
|
||||||
f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu))
|
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
c.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": c.lightHouse.myVpnIp,
|
||||||
|
"relayTo": vpnIp,
|
||||||
|
"initiatorRelayIndex": idx,
|
||||||
|
"relay": *relay}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -261,7 +285,6 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
|
|||||||
|
|
||||||
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
||||||
if !lighthouseTriggered {
|
if !lighthouseTriggered {
|
||||||
//TODO: feel like we dupe handshake real fast in a tight loop, why?
|
|
||||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -281,7 +304,6 @@ var (
|
|||||||
ErrExistingHostInfo = errors.New("existing hostinfo")
|
ErrExistingHostInfo = errors.New("existing hostinfo")
|
||||||
ErrAlreadySeen = errors.New("already seen")
|
ErrAlreadySeen = errors.New("already seen")
|
||||||
ErrLocalIndexCollision = errors.New("local index collision")
|
ErrLocalIndexCollision = errors.New("local index collision")
|
||||||
ErrExistingHandshake = errors.New("existing handshake")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CheckAndComplete checks for any conflicts in the main and pending hostmap
|
// CheckAndComplete checks for any conflicts in the main and pending hostmap
|
||||||
@@ -295,7 +317,7 @@ var (
|
|||||||
//
|
//
|
||||||
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
||||||
// hostmap for the hostinfo.localIndexId.
|
// hostmap for the hostinfo.localIndexId.
|
||||||
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, overwrite bool, f *Interface) (*HostInfo, error) {
|
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
|
||||||
c.pendingHostMap.Lock()
|
c.pendingHostMap.Lock()
|
||||||
defer c.pendingHostMap.Unlock()
|
defer c.pendingHostMap.Unlock()
|
||||||
c.mainHostMap.Lock()
|
c.mainHostMap.Lock()
|
||||||
@@ -304,13 +326,18 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
|||||||
// Check if we already have a tunnel with this vpn ip
|
// Check if we already have a tunnel with this vpn ip
|
||||||
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
||||||
if found && existingHostInfo != nil {
|
if found && existingHostInfo != nil {
|
||||||
// Is it just a delayed handshake packet?
|
testHostInfo := existingHostInfo
|
||||||
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) {
|
for testHostInfo != nil {
|
||||||
return existingHostInfo, ErrAlreadySeen
|
// Is it just a delayed handshake packet?
|
||||||
|
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], testHostInfo.HandshakePacket[handshakePacket]) {
|
||||||
|
return testHostInfo, ErrAlreadySeen
|
||||||
|
}
|
||||||
|
|
||||||
|
testHostInfo = testHostInfo.next
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is this a newer handshake?
|
// Is this a newer handshake?
|
||||||
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime {
|
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime && !existingHostInfo.ConnectionState.initiator {
|
||||||
return existingHostInfo, ErrExistingHostInfo
|
return existingHostInfo, ErrExistingHostInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -338,56 +365,19 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
|||||||
Info("New host shadows existing host remoteIndex")
|
Info("New host shadows existing host remoteIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we are also handshaking with this vpn ip
|
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||||
pendingHostInfo, found := c.pendingHostMap.Hosts[hostinfo.vpnIp]
|
|
||||||
if found && pendingHostInfo != nil {
|
|
||||||
if !overwrite {
|
|
||||||
// We won, let our pending handshake win
|
|
||||||
return pendingHostInfo, ErrExistingHandshake
|
|
||||||
}
|
|
||||||
|
|
||||||
// We lost, take this handshake and move any cached packets over so they get sent
|
|
||||||
pendingHostInfo.ConnectionState.queueLock.Lock()
|
|
||||||
hostinfo.packetStore = append(hostinfo.packetStore, pendingHostInfo.packetStore...)
|
|
||||||
c.pendingHostMap.unlockedDeleteHostInfo(pendingHostInfo)
|
|
||||||
pendingHostInfo.ConnectionState.queueLock.Unlock()
|
|
||||||
pendingHostInfo.logger(c.l).Info("Handshake race lost, replacing pending handshake with completed tunnel")
|
|
||||||
}
|
|
||||||
|
|
||||||
if existingHostInfo != nil {
|
|
||||||
// We are going to overwrite this entry, so remove the old references
|
|
||||||
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
|
|
||||||
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
|
|
||||||
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
|
|
||||||
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
|
|
||||||
delete(c.mainHostMap.Relays, relayIdx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.mainHostMap.addHostInfo(hostinfo, f)
|
|
||||||
return existingHostInfo, nil
|
return existingHostInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complete is a simpler version of CheckAndComplete when we already know we
|
// Complete is a simpler version of CheckAndComplete when we already know we
|
||||||
// won't have a localIndexId collision because we already have an entry in the
|
// won't have a localIndexId collision because we already have an entry in the
|
||||||
// pendingHostMap
|
// pendingHostMap. An existing hostinfo is returned if there was one.
|
||||||
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
||||||
c.pendingHostMap.Lock()
|
c.pendingHostMap.Lock()
|
||||||
defer c.pendingHostMap.Unlock()
|
defer c.pendingHostMap.Unlock()
|
||||||
c.mainHostMap.Lock()
|
c.mainHostMap.Lock()
|
||||||
defer c.mainHostMap.Unlock()
|
defer c.mainHostMap.Unlock()
|
||||||
|
|
||||||
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
|
||||||
if found && existingHostInfo != nil {
|
|
||||||
// We are going to overwrite this entry, so remove the old references
|
|
||||||
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
|
|
||||||
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
|
|
||||||
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
|
|
||||||
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
|
|
||||||
delete(c.mainHostMap.Relays, relayIdx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
if found && existingRemoteIndex != nil {
|
if found && existingRemoteIndex != nil {
|
||||||
// We have a collision, but this can happen since we can't control
|
// We have a collision, but this can happen since we can't control
|
||||||
@@ -397,8 +387,9 @@ func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
|||||||
Info("New host shadows existing host remoteIndex")
|
Info("New host shadows existing host remoteIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.mainHostMap.addHostInfo(hostinfo, f)
|
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
|
||||||
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
|
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
|
||||||
|
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
|
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
|
||||||
|
|||||||
@@ -21,11 +21,7 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
|||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
mw := &mockEncWriter{}
|
mw := &mockEncWriter{}
|
||||||
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||||
lh := &LightHouse{
|
lh := newTestLighthouse()
|
||||||
atomicStaticList: make(map[iputil.VpnIp]struct{}),
|
|
||||||
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
|
|
||||||
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
|
||||||
}
|
|
||||||
|
|
||||||
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
||||||
|
|
||||||
@@ -45,7 +41,7 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
|||||||
assert.False(t, initCalled)
|
assert.False(t, initCalled)
|
||||||
assert.Same(t, i, i2)
|
assert.Same(t, i, i2)
|
||||||
|
|
||||||
i.remotes = NewRemoteList()
|
i.remotes = NewRemoteList(nil)
|
||||||
i.HandshakeReady = true
|
i.HandshakeReady = true
|
||||||
|
|
||||||
// Adding something to pending should not affect the main hostmap
|
// Adding something to pending should not affect the main hostmap
|
||||||
@@ -70,53 +66,8 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
|||||||
assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
|
assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewHandshakeManagerTrigger(t *testing.T) {
|
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
|
||||||
l := test.NewLogger()
|
for _, i := range tw.t.wheel {
|
||||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
|
||||||
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
|
||||||
mw := &mockEncWriter{}
|
|
||||||
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
|
||||||
lh := &LightHouse{
|
|
||||||
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
|
||||||
l: l,
|
|
||||||
atomicStaticList: make(map[iputil.VpnIp]struct{}),
|
|
||||||
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
|
||||||
|
|
||||||
assert.Equal(t, 0, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
|
|
||||||
hi := blah.AddVpnIp(ip, nil)
|
|
||||||
hi.HandshakeReady = true
|
|
||||||
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
assert.Equal(t, 0, hi.HandshakeCounter, "Should not have attempted a handshake yet")
|
|
||||||
|
|
||||||
// Trigger the same method the channel will but, this should set our remotes pointer
|
|
||||||
blah.handleOutbound(ip, mw, true)
|
|
||||||
assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have done a handshake attempt")
|
|
||||||
assert.NotNil(t, hi.remotes, "Manager should have set my remotes pointer")
|
|
||||||
|
|
||||||
// Make sure the trigger doesn't double schedule the timer entry
|
|
||||||
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
|
|
||||||
uaddr := udp.NewAddrFromString("10.1.1.1:4242")
|
|
||||||
hi.remotes.unlockedPrependV4(ip, NewIp4AndPort(uaddr.IP, uint32(uaddr.Port)))
|
|
||||||
|
|
||||||
// We now have remotes but only the first trigger should have pushed things forward
|
|
||||||
blah.handleOutbound(ip, mw, true)
|
|
||||||
assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have not done a handshake attempt")
|
|
||||||
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) {
|
|
||||||
for _, i := range tw.wheel {
|
|
||||||
n := i.Head
|
n := i.Head
|
||||||
for n != nil {
|
for n != nil {
|
||||||
c++
|
c++
|
||||||
@@ -133,7 +84,11 @@ func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.Mess
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) {
|
func (mw *mockEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
390
hostmap.go
390
hostmap.go
@@ -1,7 +1,6 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
@@ -18,17 +17,22 @@ import (
|
|||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
//const ProbeLen = 100
|
// const ProbeLen = 100
|
||||||
const PromoteEvery = 1000
|
const PromoteEvery = 1000
|
||||||
const ReQueryEvery = 5000
|
const ReQueryEvery = 5000
|
||||||
const MaxRemotes = 10
|
const MaxRemotes = 10
|
||||||
|
|
||||||
|
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
||||||
|
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
||||||
|
const MaxHostInfosPerVpnIp = 5
|
||||||
|
|
||||||
// How long we should prevent roaming back to the previous IP.
|
// How long we should prevent roaming back to the previous IP.
|
||||||
// This helps prevent flapping due to packets already in flight
|
// This helps prevent flapping due to packets already in flight
|
||||||
const RoamingSuppressSeconds = 2
|
const RoamingSuppressSeconds = 2
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Requested = iota
|
Requested = iota
|
||||||
|
PeerRequested
|
||||||
Established
|
Established
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -59,6 +63,9 @@ type HostMap struct {
|
|||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For synchronization, treat the pointed-to Relay struct as immutable. To edit the Relay
|
||||||
|
// struct, make a copy of an existing value, edit the fileds in the copy, and
|
||||||
|
// then store a pointer to the new copy in both realyForBy* maps.
|
||||||
type RelayState struct {
|
type RelayState struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
@@ -73,6 +80,16 @@ func (rs *RelayState) DeleteRelay(ip iputil.VpnIp) {
|
|||||||
delete(rs.relays, ip)
|
delete(rs.relays, ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
||||||
|
rs.RLock()
|
||||||
|
defer rs.RUnlock()
|
||||||
|
ret := make([]*Relay, 0, len(rs.relayForByIdx))
|
||||||
|
for _, r := range rs.relayForByIdx {
|
||||||
|
ret = append(ret, r)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) {
|
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
@@ -119,13 +136,43 @@ func (rs *RelayState) CopyRelayForIdxs() []uint32 {
|
|||||||
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) {
|
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
relay, ok := rs.relayForByIdx[localIdx]
|
r, ok := rs.relayForByIdx[localIdx]
|
||||||
if !ok {
|
if !ok {
|
||||||
return iputil.VpnIp(0), false
|
return iputil.VpnIp(0), false
|
||||||
}
|
}
|
||||||
delete(rs.relayForByIdx, localIdx)
|
delete(rs.relayForByIdx, localIdx)
|
||||||
delete(rs.relayForByIp, relay.PeerIp)
|
delete(rs.relayForByIp, r.PeerIp)
|
||||||
return relay.PeerIp, true
|
return r.PeerIp, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) CompleteRelayByIP(vpnIp iputil.VpnIp, remoteIdx uint32) bool {
|
||||||
|
rs.Lock()
|
||||||
|
defer rs.Unlock()
|
||||||
|
r, ok := rs.relayForByIp[vpnIp]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
newRelay := *r
|
||||||
|
newRelay.State = Established
|
||||||
|
newRelay.RemoteIndex = remoteIdx
|
||||||
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
||||||
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Relay, bool) {
|
||||||
|
rs.Lock()
|
||||||
|
defer rs.Unlock()
|
||||||
|
r, ok := rs.relayForByIdx[localIdx]
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
newRelay := *r
|
||||||
|
newRelay.State = Established
|
||||||
|
newRelay.RemoteIndex = remoteIdx
|
||||||
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
||||||
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
||||||
|
return &newRelay, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) {
|
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) {
|
||||||
@@ -141,6 +188,7 @@ func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
|
|||||||
r, ok := rs.relayForByIdx[idx]
|
r, ok := rs.relayForByIdx[idx]
|
||||||
return r, ok
|
return r, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
@@ -151,22 +199,23 @@ func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
|||||||
type HostInfo struct {
|
type HostInfo struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
remote *udp.Addr
|
remote *udp.Addr
|
||||||
remotes *RemoteList
|
remotes *RemoteList
|
||||||
promoteCounter uint32
|
promoteCounter atomic.Uint32
|
||||||
ConnectionState *ConnectionState
|
ConnectionState *ConnectionState
|
||||||
handshakeStart time.Time //todo: this an entry in the handshake manager
|
handshakeStart time.Time //todo: this an entry in the handshake manager
|
||||||
HandshakeReady bool //todo: being in the manager means you are ready
|
HandshakeReady bool //todo: being in the manager means you are ready
|
||||||
HandshakeCounter int //todo: another handshake manager entry
|
HandshakeCounter int //todo: another handshake manager entry
|
||||||
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
|
HandshakeLastRemotes []*udp.Addr //todo: another handshake manager entry, which remotes we sent to last time
|
||||||
HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry
|
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
|
||||||
packetStore []*cachedPacket //todo: this is other handshake manager entry
|
HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry
|
||||||
remoteIndexId uint32
|
packetStore []*cachedPacket //todo: this is other handshake manager entry
|
||||||
localIndexId uint32
|
remoteIndexId uint32
|
||||||
vpnIp iputil.VpnIp
|
localIndexId uint32
|
||||||
recvError int
|
vpnIp iputil.VpnIp
|
||||||
remoteCidr *cidr.Tree4
|
recvError int
|
||||||
relayState RelayState
|
remoteCidr *cidr.Tree4
|
||||||
|
relayState RelayState
|
||||||
|
|
||||||
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
|
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
|
||||||
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
|
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
|
||||||
@@ -180,6 +229,10 @@ type HostInfo struct {
|
|||||||
|
|
||||||
lastRoam time.Time
|
lastRoam time.Time
|
||||||
lastRoamRemote *udp.Addr
|
lastRoamRemote *udp.Addr
|
||||||
|
|
||||||
|
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
||||||
|
// Synchronised via hostmap lock and not the hostinfo lock.
|
||||||
|
next, prev *HostInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
type ViaSender struct {
|
type ViaSender struct {
|
||||||
@@ -237,29 +290,13 @@ func (hm *HostMap) EmitStats(name string) {
|
|||||||
|
|
||||||
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
hiRelay, ok := hm.Relays[localIdx]
|
_, ok := hm.Relays[localIdx]
|
||||||
if !ok {
|
if !ok {
|
||||||
hm.Unlock()
|
hm.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
delete(hm.Relays, localIdx)
|
delete(hm.Relays, localIdx)
|
||||||
hm.Unlock()
|
hm.Unlock()
|
||||||
ip, ok := hiRelay.relayState.RemoveRelay(localIdx)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hiPeer, err := hm.QueryVpnIp(ip)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var otherPeerIdx uint32
|
|
||||||
hiPeer.relayState.DeleteRelay(hiRelay.vpnIp)
|
|
||||||
relay, ok := hiPeer.relayState.GetRelayForByIp(hiRelay.vpnIp)
|
|
||||||
if ok {
|
|
||||||
otherPeerIdx = relay.LocalIndex
|
|
||||||
}
|
|
||||||
// I am a relaying host. I need to remove the other relay, too.
|
|
||||||
hm.RemoveRelay(otherPeerIdx)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) {
|
func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) {
|
||||||
@@ -284,7 +321,6 @@ func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (
|
|||||||
if h, ok := hm.Hosts[vpnIp]; !ok {
|
if h, ok := hm.Hosts[vpnIp]; !ok {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
h = &HostInfo{
|
h = &HostInfo{
|
||||||
promoteCounter: 0,
|
|
||||||
vpnIp: vpnIp,
|
vpnIp: vpnIp,
|
||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
@@ -306,20 +342,6 @@ func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) DeleteVpnIp(vpnIp iputil.VpnIp) {
|
|
||||||
hm.Lock()
|
|
||||||
delete(hm.Hosts, vpnIp)
|
|
||||||
if len(hm.Hosts) == 0 {
|
|
||||||
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
|
||||||
}
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": vpnIp, "mapTotalSize": len(hm.Hosts)}).
|
|
||||||
Debug("Hostmap vpnIp deleted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only used by pendingHostMap when the remote index is not initially known
|
// Only used by pendingHostMap when the remote index is not initially known
|
||||||
func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
@@ -334,45 +356,8 @@ func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) AddVpnIpHostInfo(vpnIp iputil.VpnIp, h *HostInfo) {
|
// DeleteReverseIndex is used to clean up on recv_error
|
||||||
hm.Lock()
|
// This function should only ever be called on the pending hostmap
|
||||||
h.vpnIp = vpnIp
|
|
||||||
hm.Hosts[vpnIp] = h
|
|
||||||
hm.Indexes[h.localIndexId] = h
|
|
||||||
hm.RemoteIndexes[h.remoteIndexId] = h
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level > logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": vpnIp, "mapTotalSize": len(hm.Hosts),
|
|
||||||
"hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "vpnIp": h.vpnIp}}).
|
|
||||||
Debug("Hostmap vpnIp added")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is only called in pendingHostmap, to cleanup an inbound handshake
|
|
||||||
func (hm *HostMap) DeleteIndex(index uint32) {
|
|
||||||
hm.Lock()
|
|
||||||
hostinfo, ok := hm.Indexes[index]
|
|
||||||
if ok {
|
|
||||||
delete(hm.Indexes, index)
|
|
||||||
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
|
|
||||||
|
|
||||||
// Check if we have an entry under hostId that matches the same hostinfo
|
|
||||||
// instance. Clean it up as well if we do.
|
|
||||||
hostinfo2, ok := hm.Hosts[hostinfo.vpnIp]
|
|
||||||
if ok && hostinfo2 == hostinfo {
|
|
||||||
delete(hm.Hosts, hostinfo.vpnIp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hm.Unlock()
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}).
|
|
||||||
Debug("Hostmap index deleted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is used to cleanup on recv_error
|
|
||||||
func (hm *HostMap) DeleteReverseIndex(index uint32) {
|
func (hm *HostMap) DeleteReverseIndex(index uint32) {
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
hostinfo, ok := hm.RemoteIndexes[index]
|
hostinfo, ok := hm.RemoteIndexes[index]
|
||||||
@@ -396,32 +381,16 @@ func (hm *HostMap) DeleteReverseIndex(index uint32) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) {
|
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
|
||||||
|
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
||||||
// Delete the host itself, ensuring it's not modified anymore
|
// Delete the host itself, ensuring it's not modified anymore
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
|
// If we have a previous or next hostinfo then we are not the last one for this vpn ip
|
||||||
|
final := (hostinfo.next == nil && hostinfo.prev == nil)
|
||||||
hm.unlockedDeleteHostInfo(hostinfo)
|
hm.unlockedDeleteHostInfo(hostinfo)
|
||||||
hm.Unlock()
|
hm.Unlock()
|
||||||
|
|
||||||
// And tear down all the relays going through this host
|
return final
|
||||||
for _, localIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
|
||||||
hm.RemoveRelay(localIdx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// And tear down the relays this deleted hostInfo was using to be reached
|
|
||||||
teardownRelayIdx := []uint32{}
|
|
||||||
for _, relayIp := range hostinfo.relayState.CopyRelayIps() {
|
|
||||||
relayHostInfo, err := hm.QueryVpnIp(relayIp)
|
|
||||||
if err != nil {
|
|
||||||
hm.l.WithError(err).WithField("relay", relayIp).Info("Missing relay host in hostmap")
|
|
||||||
} else {
|
|
||||||
if r, ok := relayHostInfo.relayState.QueryRelayForByIp(hostinfo.vpnIp); ok {
|
|
||||||
teardownRelayIdx = append(teardownRelayIdx, r.LocalIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, localIdx := range teardownRelayIdx {
|
|
||||||
hm.RemoveRelay(localIdx)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
|
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
|
||||||
@@ -430,35 +399,91 @@ func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
|
|||||||
delete(hm.RemoteIndexes, localIdx)
|
delete(hm.RemoteIndexes, localIdx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
|
||||||
// Check if this same hostId is in the hostmap with a different instance.
|
hm.Lock()
|
||||||
// This could happen if we have an entry in the pending hostmap with different
|
defer hm.Unlock()
|
||||||
// index values than the one in the main hostmap.
|
hm.unlockedMakePrimary(hostinfo)
|
||||||
hostinfo2, ok := hm.Hosts[hostinfo.vpnIp]
|
}
|
||||||
if ok && hostinfo2 != hostinfo {
|
|
||||||
delete(hm.Hosts, hostinfo2.vpnIp)
|
func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
||||||
delete(hm.Indexes, hostinfo2.localIndexId)
|
oldHostinfo := hm.Hosts[hostinfo.vpnIp]
|
||||||
delete(hm.RemoteIndexes, hostinfo2.remoteIndexId)
|
if oldHostinfo == hostinfo {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(hm.Hosts, hostinfo.vpnIp)
|
if hostinfo.prev != nil {
|
||||||
if len(hm.Hosts) == 0 {
|
hostinfo.prev.next = hostinfo.next
|
||||||
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if hostinfo.next != nil {
|
||||||
|
hostinfo.next.prev = hostinfo.prev
|
||||||
|
}
|
||||||
|
|
||||||
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
||||||
|
|
||||||
|
if oldHostinfo == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hostinfo.next = oldHostinfo
|
||||||
|
oldHostinfo.prev = hostinfo
|
||||||
|
hostinfo.prev = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
|
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
||||||
|
if ok && primary == hostinfo {
|
||||||
|
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
||||||
|
delete(hm.Hosts, hostinfo.vpnIp)
|
||||||
|
if len(hm.Hosts) == 0 {
|
||||||
|
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostinfo.next != nil {
|
||||||
|
// We had more than 1 hostinfo at this vpnip, promote the next in the list to primary
|
||||||
|
hm.Hosts[hostinfo.vpnIp] = hostinfo.next
|
||||||
|
// It is primary, there is no previous hostinfo now
|
||||||
|
hostinfo.next.prev = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Relink if we were in the middle of multiple hostinfos for this vpn ip
|
||||||
|
if hostinfo.prev != nil {
|
||||||
|
hostinfo.prev.next = hostinfo.next
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostinfo.next != nil {
|
||||||
|
hostinfo.next.prev = hostinfo.prev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hostinfo.next = nil
|
||||||
|
hostinfo.prev = nil
|
||||||
|
|
||||||
|
// The remote index uses index ids outside our control so lets make sure we are only removing
|
||||||
|
// the remote index pointer here if it points to the hostinfo we are deleting
|
||||||
|
hostinfo2, ok := hm.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
|
if ok && hostinfo2 == hostinfo {
|
||||||
|
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
|
||||||
|
if len(hm.RemoteIndexes) == 0 {
|
||||||
|
hm.RemoteIndexes = map[uint32]*HostInfo{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
delete(hm.Indexes, hostinfo.localIndexId)
|
delete(hm.Indexes, hostinfo.localIndexId)
|
||||||
if len(hm.Indexes) == 0 {
|
if len(hm.Indexes) == 0 {
|
||||||
hm.Indexes = map[uint32]*HostInfo{}
|
hm.Indexes = map[uint32]*HostInfo{}
|
||||||
}
|
}
|
||||||
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
|
|
||||||
if len(hm.RemoteIndexes) == 0 {
|
|
||||||
hm.RemoteIndexes = map[uint32]*HostInfo{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
if hm.l.Level >= logrus.DebugLevel {
|
||||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts),
|
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts),
|
||||||
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||||
Debug("Hostmap hostInfo deleted")
|
Debug("Hostmap hostInfo deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
|
delete(hm.Relays, localRelayIdx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) {
|
func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) {
|
||||||
@@ -472,6 +497,20 @@ func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) {
|
|||||||
return nil, errors.New("unable to find index")
|
return nil, errors.New("unable to find index")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Retrieves a HostInfo by Index. Returns whether the HostInfo is primary at time of query.
|
||||||
|
// This helper exists so that the hostinfo.prev pointer can be read while the hostmap lock is held.
|
||||||
|
func (hm *HostMap) QueryIndexIsPrimary(index uint32) (*HostInfo, bool, error) {
|
||||||
|
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||||
|
hm.RLock()
|
||||||
|
if h, ok := hm.Indexes[index]; ok {
|
||||||
|
hm.RUnlock()
|
||||||
|
return h, h.prev == nil, nil
|
||||||
|
} else {
|
||||||
|
hm.RUnlock()
|
||||||
|
return nil, false, errors.New("unable to find index")
|
||||||
|
}
|
||||||
|
}
|
||||||
func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) {
|
func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) {
|
||||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
@@ -499,6 +538,24 @@ func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) (*HostInfo, error) {
|
|||||||
return hm.queryVpnIp(vpnIp, nil)
|
return hm.queryVpnIp(vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*HostInfo, *Relay, error) {
|
||||||
|
hm.RLock()
|
||||||
|
defer hm.RUnlock()
|
||||||
|
|
||||||
|
h, ok := hm.Hosts[relayHostIp]
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, errors.New("unable to find host")
|
||||||
|
}
|
||||||
|
for h != nil {
|
||||||
|
r, ok := h.relayState.QueryRelayForByIp(targetIp)
|
||||||
|
if ok && r.State == Established {
|
||||||
|
return h, r, nil
|
||||||
|
}
|
||||||
|
h = h.next
|
||||||
|
}
|
||||||
|
return nil, nil, errors.New("unable to find host with relay")
|
||||||
|
}
|
||||||
|
|
||||||
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every
|
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every
|
||||||
// `PromoteEvery` calls to this function for a given host.
|
// `PromoteEvery` calls to this function for a given host.
|
||||||
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) {
|
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) {
|
||||||
@@ -521,15 +578,22 @@ func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*Host
|
|||||||
return nil, errors.New("unable to find host")
|
return nil, errors.New("unable to find host")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We already have the hm Lock when this is called, so make sure to not call
|
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
|
||||||
// any other methods that might try to grab it again
|
// If an entry exists for the Hosts table (vpnIp -> hostinfo) then the provided hostinfo will be made primary
|
||||||
func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) {
|
func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
||||||
if f.serveDns {
|
if f.serveDns {
|
||||||
remoteCert := hostinfo.ConnectionState.peerCert
|
remoteCert := hostinfo.ConnectionState.peerCert
|
||||||
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
existing := hm.Hosts[hostinfo.vpnIp]
|
||||||
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
||||||
|
|
||||||
|
if existing != nil {
|
||||||
|
hostinfo.next = existing
|
||||||
|
existing.prev = hostinfo
|
||||||
|
}
|
||||||
|
|
||||||
hm.Indexes[hostinfo.localIndexId] = hostinfo
|
hm.Indexes[hostinfo.localIndexId] = hostinfo
|
||||||
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
||||||
|
|
||||||
@@ -538,60 +602,22 @@ func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) {
|
|||||||
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
||||||
Debug("Hostmap vpnIp added")
|
Debug("Hostmap vpnIp added")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// punchList assembles a list of all non nil RemoteList pointer entries in this hostmap
|
i := 1
|
||||||
// The caller can then do the its work outside of the read lock
|
check := hostinfo
|
||||||
func (hm *HostMap) punchList(rl []*RemoteList) []*RemoteList {
|
for check != nil {
|
||||||
hm.RLock()
|
if i > MaxHostInfosPerVpnIp {
|
||||||
defer hm.RUnlock()
|
hm.unlockedDeleteHostInfo(check)
|
||||||
|
|
||||||
for _, v := range hm.Hosts {
|
|
||||||
if v.remotes != nil {
|
|
||||||
rl = append(rl, v.remotes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rl
|
|
||||||
}
|
|
||||||
|
|
||||||
// Punchy iterates through the result of punchList() to assemble all known addresses and sends a hole punch packet to them
|
|
||||||
func (hm *HostMap) Punchy(ctx context.Context, conn *udp.Conn) {
|
|
||||||
var metricsTxPunchy metrics.Counter
|
|
||||||
if hm.metricsEnabled {
|
|
||||||
metricsTxPunchy = metrics.GetOrRegisterCounter("messages.tx.punchy", nil)
|
|
||||||
} else {
|
|
||||||
metricsTxPunchy = metrics.NilCounter{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var remotes []*RemoteList
|
|
||||||
b := []byte{1}
|
|
||||||
|
|
||||||
clockSource := time.NewTicker(time.Second * 10)
|
|
||||||
defer clockSource.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
remotes = hm.punchList(remotes[:0])
|
|
||||||
for _, rl := range remotes {
|
|
||||||
//TODO: CopyAddrs generates garbage but ForEach locks for the work here, figure out which way is better
|
|
||||||
for _, addr := range rl.CopyAddrs(hm.preferredRanges) {
|
|
||||||
metricsTxPunchy.Inc(1)
|
|
||||||
conn.WriteTo(b, addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-clockSource.C:
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
check = check.next
|
||||||
|
i++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
||||||
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
||||||
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
|
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
|
||||||
c := atomic.AddUint32(&i.promoteCounter, 1)
|
c := i.promoteCounter.Add(1)
|
||||||
if c%PromoteEvery == 0 {
|
if c%PromoteEvery == 0 {
|
||||||
// The lock here is currently protecting i.remote access
|
// The lock here is currently protecting i.remote access
|
||||||
i.RLock()
|
i.RLock()
|
||||||
@@ -658,7 +684,7 @@ func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
|
|||||||
i.HandshakeComplete = true
|
i.HandshakeComplete = true
|
||||||
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
|
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
|
||||||
// Clamping it to 2 gets us out of the woods for now
|
// Clamping it to 2 gets us out of the woods for now
|
||||||
atomic.StoreUint64(&i.ConnectionState.atomicMessageCounter, 2)
|
i.ConnectionState.messageCounter.Store(2)
|
||||||
|
|
||||||
if l.Level >= logrus.DebugLevel {
|
if l.Level >= logrus.DebugLevel {
|
||||||
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
|
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
|
||||||
@@ -677,7 +703,6 @@ func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
|
|||||||
i.packetStore = make([]*cachedPacket, 0)
|
i.packetStore = make([]*cachedPacket, 0)
|
||||||
i.ConnectionState.ready = true
|
i.ConnectionState.ready = true
|
||||||
i.ConnectionState.queueLock.Unlock()
|
i.ConnectionState.queueLock.Unlock()
|
||||||
i.ConnectionState.certState = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
||||||
@@ -765,7 +790,10 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
|||||||
return logrus.NewEntry(l)
|
return logrus.NewEntry(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
li := l.WithField("vpnIp", i.vpnIp)
|
li := l.WithField("vpnIp", i.vpnIp).
|
||||||
|
WithField("localIndex", i.localIndexId).
|
||||||
|
WithField("remoteIndex", i.remoteIndexId)
|
||||||
|
|
||||||
if connState := i.ConnectionState; connState != nil {
|
if connState := i.ConnectionState; connState != nil {
|
||||||
if peerCert := connState.peerCert; peerCert != nil {
|
if peerCert := connState.peerCert; peerCert != nil {
|
||||||
li = li.WithField("certName", peerCert.Details.Name)
|
li = li.WithField("certName", peerCert.Details.Name)
|
||||||
|
|||||||
206
hostmap_test.go
206
hostmap_test.go
@@ -1 +1,207 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/test"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHostMap_MakePrimary(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
hm := NewHostMap(
|
||||||
|
l, "test",
|
||||||
|
&net.IPNet{
|
||||||
|
IP: net.IP{10, 0, 0, 1},
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
},
|
||||||
|
[]*net.IPNet{},
|
||||||
|
)
|
||||||
|
|
||||||
|
f := &Interface{}
|
||||||
|
|
||||||
|
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
|
||||||
|
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
|
||||||
|
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
|
||||||
|
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
|
||||||
|
|
||||||
|
hm.unlockedAddHostInfo(h4, f)
|
||||||
|
hm.unlockedAddHostInfo(h3, f)
|
||||||
|
hm.unlockedAddHostInfo(h2, f)
|
||||||
|
hm.unlockedAddHostInfo(h1, f)
|
||||||
|
|
||||||
|
// Make sure we go h1 -> h2 -> h3 -> h4
|
||||||
|
prim, _ := hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Swap h3/middle to primary
|
||||||
|
hm.MakePrimary(h3)
|
||||||
|
|
||||||
|
// Make sure we go h3 -> h1 -> h2 -> h4
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Swap h4/tail to primary
|
||||||
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Nil(t, h2.next)
|
||||||
|
|
||||||
|
// Swap h4 again should be no-op
|
||||||
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Nil(t, h2.next)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
hm := NewHostMap(
|
||||||
|
l, "test",
|
||||||
|
&net.IPNet{
|
||||||
|
IP: net.IP{10, 0, 0, 1},
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
},
|
||||||
|
[]*net.IPNet{},
|
||||||
|
)
|
||||||
|
|
||||||
|
f := &Interface{}
|
||||||
|
|
||||||
|
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
|
||||||
|
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
|
||||||
|
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
|
||||||
|
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
|
||||||
|
h5 := &HostInfo{vpnIp: 1, localIndexId: 5}
|
||||||
|
h6 := &HostInfo{vpnIp: 1, localIndexId: 6}
|
||||||
|
|
||||||
|
hm.unlockedAddHostInfo(h6, f)
|
||||||
|
hm.unlockedAddHostInfo(h5, f)
|
||||||
|
hm.unlockedAddHostInfo(h4, f)
|
||||||
|
hm.unlockedAddHostInfo(h3, f)
|
||||||
|
hm.unlockedAddHostInfo(h2, f)
|
||||||
|
hm.unlockedAddHostInfo(h1, f)
|
||||||
|
|
||||||
|
// h6 should be deleted
|
||||||
|
assert.Nil(t, h6.next)
|
||||||
|
assert.Nil(t, h6.prev)
|
||||||
|
_, err := hm.QueryIndex(h6.localIndexId)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
|
||||||
|
prim, _ := hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Delete primary
|
||||||
|
hm.DeleteHostInfo(h1)
|
||||||
|
assert.Nil(t, h1.prev)
|
||||||
|
assert.Nil(t, h1.next)
|
||||||
|
|
||||||
|
// Make sure we go h2 -> h3 -> h4 -> h5
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
|
||||||
|
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Delete in the middle
|
||||||
|
hm.DeleteHostInfo(h3)
|
||||||
|
assert.Nil(t, h3.prev)
|
||||||
|
assert.Nil(t, h3.next)
|
||||||
|
|
||||||
|
// Make sure we go h2 -> h4 -> h5
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Delete the tail
|
||||||
|
hm.DeleteHostInfo(h5)
|
||||||
|
assert.Nil(t, h5.prev)
|
||||||
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
|
// Make sure we go h2 -> h4
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
|
||||||
|
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Delete the head
|
||||||
|
hm.DeleteHostInfo(h2)
|
||||||
|
assert.Nil(t, h2.prev)
|
||||||
|
assert.Nil(t, h2.next)
|
||||||
|
|
||||||
|
// Make sure we only have h4
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
|
assert.Nil(t, prim.prev)
|
||||||
|
assert.Nil(t, prim.next)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Delete the only item
|
||||||
|
hm.DeleteHostInfo(h4)
|
||||||
|
assert.Nil(t, h4.prev)
|
||||||
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
|
// Make sure we have nil
|
||||||
|
prim, _ = hm.QueryVpnIp(1)
|
||||||
|
assert.Nil(t, prim)
|
||||||
|
}
|
||||||
|
|||||||
24
hostmap_tester.go
Normal file
24
hostmap_tester.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
// This file contains functions used to export information to the e2e testing framework
|
||||||
|
|
||||||
|
import "github.com/slackhq/nebula/iputil"
|
||||||
|
|
||||||
|
func (i *HostInfo) GetVpnIp() iputil.VpnIp {
|
||||||
|
return i.vpnIp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *HostInfo) GetLocalIndex() uint32 {
|
||||||
|
return i.localIndexId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *HostInfo) GetRemoteIndex() uint32 {
|
||||||
|
return i.remoteIndexId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *HostInfo) GetRelayState() *RelayState {
|
||||||
|
return &i.relayState
|
||||||
|
}
|
||||||
135
inside.go
135
inside.go
@@ -1,20 +1,21 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
err := newPacket(packet, false, fwPacket)
|
err := newPacket(packet, false, fwPacket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err)
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -25,8 +26,9 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
|
|
||||||
if fwPacket.RemoteIP == f.myVpnIp {
|
if fwPacket.RemoteIP == f.myVpnIp {
|
||||||
// Immediately forward packets from self to self.
|
// Immediately forward packets from self to self.
|
||||||
// This should only happen on Darwin-based hosts, which routes packets from
|
// This should only happen on Darwin-based and FreeBSD hosts, which
|
||||||
// the Nebula IP to the Nebula IP through the Nebula TUN device.
|
// routes packets from the Nebula IP to the Nebula IP through the Nebula
|
||||||
|
// TUN device.
|
||||||
if immediatelyForwardToSelf {
|
if immediatelyForwardToSelf {
|
||||||
_, err := f.readers[q].Write(packet)
|
_, err := f.readers[q].Write(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -45,6 +47,7 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
|
|
||||||
hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
|
hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
|
f.rejectInside(packet, out, q)
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("vpnIp", fwPacket.RemoteIP).
|
f.l.WithField("vpnIp", fwPacket.RemoteIP).
|
||||||
WithField("fwPacket", fwPacket).
|
WithField("fwPacket", fwPacket).
|
||||||
@@ -54,7 +57,7 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
}
|
}
|
||||||
ci := hostinfo.ConnectionState
|
ci := hostinfo.ConnectionState
|
||||||
|
|
||||||
if ci.ready == false {
|
if !ci.ready {
|
||||||
// Because we might be sending stored packets, lock here to stop new things going to
|
// Because we might be sending stored packets, lock here to stop new things going to
|
||||||
// the packet queue.
|
// the packet queue.
|
||||||
ci.queueLock.Lock()
|
ci.queueLock.Lock()
|
||||||
@@ -70,22 +73,49 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
if dropReason == nil {
|
if dropReason == nil {
|
||||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, packet, nb, out, q)
|
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, packet, nb, out, q)
|
||||||
|
|
||||||
} else if f.l.Level >= logrus.DebugLevel {
|
} else {
|
||||||
hostinfo.logger(f.l).
|
f.rejectInside(packet, out, q)
|
||||||
WithField("fwPacket", fwPacket).
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
WithField("reason", dropReason).
|
hostinfo.logger(f.l).
|
||||||
Debugln("dropping outbound packet")
|
WithField("fwPacket", fwPacket).
|
||||||
|
WithField("reason", dropReason).
|
||||||
|
Debugln("dropping outbound packet")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Interface) rejectInside(packet []byte, out []byte, q int) {
|
||||||
|
if !f.firewall.InSendReject {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = iputil.CreateRejectPacket(packet, out)
|
||||||
|
_, err := f.readers[q].Write(out)
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) rejectOutside(packet []byte, ci *ConnectionState, hostinfo *HostInfo, nb, out []byte, q int) {
|
||||||
|
if !f.firewall.OutSendReject {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use some out buffer space to build the packet before encryption
|
||||||
|
// Need 40 bytes for the reject packet (20 byte ipv4 header, 20 byte tcp rst packet)
|
||||||
|
// Leave 100 bytes for the encrypted packet (60 byte Nebula header, 40 byte reject packet)
|
||||||
|
out = out[:140]
|
||||||
|
outPacket := iputil.CreateRejectPacket(packet, out[100:])
|
||||||
|
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, outPacket, nb, out, q)
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
|
func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
|
||||||
f.getOrHandshake(vpnIp)
|
f.getOrHandshake(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getOrHandshake returns nil if the vpnIp is not routable
|
// getOrHandshake returns nil if the vpnIp is not routable
|
||||||
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
|
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
|
||||||
//TODO: we can find contains without converting back to bytes
|
if !ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, vpnIp) {
|
||||||
if f.hostMap.vpnCIDR.Contains(vpnIp.ToIP()) == false {
|
|
||||||
vpnIp = f.inside.RouteFor(vpnIp)
|
vpnIp = f.inside.RouteFor(vpnIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -124,7 +154,13 @@ func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
|
|||||||
|
|
||||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||||
// We can trigger the handshake right now
|
// We can trigger the handshake right now
|
||||||
if _, ok := f.lightHouse.GetStaticHostList()[vpnIp]; ok {
|
_, doTrigger := f.lightHouse.GetStaticHostList()[vpnIp]
|
||||||
|
if !doTrigger {
|
||||||
|
// Add any calculated remotes, and trigger early handshake if one found
|
||||||
|
doTrigger = f.lightHouse.addCalculatedRemotes(vpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if doTrigger {
|
||||||
select {
|
select {
|
||||||
case f.handshakeManager.trigger <- vpnIp:
|
case f.handshakeManager.trigger <- vpnIp:
|
||||||
default:
|
default:
|
||||||
@@ -141,7 +177,7 @@ func (f *Interface) initHostInfo(hostinfo *HostInfo) {
|
|||||||
hostinfo.ConnectionState = f.newConnectionState(f.l, true, noise.HandshakeIX, []byte{}, 0)
|
hostinfo.ConnectionState = f.newConnectionState(f.l, true, noise.HandshakeIX, []byte{}, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
fp := &firewall.Packet{}
|
fp := &firewall.Packet{}
|
||||||
err := newPacket(p, false, fp)
|
err := newPacket(p, false, fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -150,7 +186,7 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if packet is in outbound fw rules
|
// check if packet is in outbound fw rules
|
||||||
dropReason := f.firewall.Drop(p, *fp, false, hostInfo, f.caPool, nil)
|
dropReason := f.firewall.Drop(p, *fp, false, hostinfo, f.caPool, nil)
|
||||||
if dropReason != nil {
|
if dropReason != nil {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("fwPacket", fp).
|
f.l.WithField("fwPacket", fp).
|
||||||
@@ -160,7 +196,7 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.sendNoMetrics(header.Message, st, hostInfo.ConnectionState, hostInfo, nil, p, nb, out, 0)
|
f.sendNoMetrics(header.Message, st, hostinfo.ConnectionState, hostinfo, nil, p, nb, out, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
||||||
@@ -179,19 +215,18 @@ func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSu
|
|||||||
// the packet queue.
|
// the packet queue.
|
||||||
hostInfo.ConnectionState.queueLock.Lock()
|
hostInfo.ConnectionState.queueLock.Lock()
|
||||||
if !hostInfo.ConnectionState.ready {
|
if !hostInfo.ConnectionState.ready {
|
||||||
hostInfo.cachePacket(f.l, t, st, p, f.sendMessageToVpnIp, f.cachedPacketMetrics)
|
hostInfo.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
||||||
hostInfo.ConnectionState.queueLock.Unlock()
|
hostInfo.ConnectionState.queueLock.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostInfo.ConnectionState.queueLock.Unlock()
|
hostInfo.ConnectionState.queueLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
f.sendMessageToVpnIp(t, st, hostInfo, p, nb, out)
|
f.SendMessageToHostInfo(t, st, hostInfo, p, nb, out)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendMessageToVpnIp(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hi *HostInfo, p, nb, out []byte) {
|
||||||
f.send(t, st, hostInfo.ConnectionState, hostInfo, p, nb, out)
|
f.send(t, st, hi.ConnectionState, hi, p, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
@@ -212,23 +247,28 @@ func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *C
|
|||||||
// nb is a buffer used to store the nonce value, re-used for performance reasons.
|
// nb is a buffer used to store the nonce value, re-used for performance reasons.
|
||||||
// out is a buffer used to store the result of the Encrypt operation
|
// out is a buffer used to store the result of the Encrypt operation
|
||||||
// q indicates which writer to use to send the packet.
|
// q indicates which writer to use to send the packet.
|
||||||
func (f *Interface) SendVia(viaIfc interface{},
|
func (f *Interface) SendVia(via *HostInfo,
|
||||||
relayIfc interface{},
|
relay *Relay,
|
||||||
ad,
|
ad,
|
||||||
nb,
|
nb,
|
||||||
out []byte,
|
out []byte,
|
||||||
nocopy bool,
|
nocopy bool,
|
||||||
) {
|
) {
|
||||||
via := viaIfc.(*HostInfo)
|
if noiseutil.EncryptLockNeeded {
|
||||||
relay := relayIfc.(*Relay)
|
// NOTE: for goboring AESGCMTLS we need to lock because of the nonce check
|
||||||
c := atomic.AddUint64(&via.ConnectionState.atomicMessageCounter, 1)
|
via.ConnectionState.writeLock.Lock()
|
||||||
|
}
|
||||||
|
c := via.ConnectionState.messageCounter.Add(1)
|
||||||
|
|
||||||
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
||||||
f.connectionManager.Out(via.vpnIp)
|
f.connectionManager.Out(via.localIndexId)
|
||||||
|
|
||||||
// Authenticate the header and payload, but do not encrypt for this message type.
|
// Authenticate the header and payload, but do not encrypt for this message type.
|
||||||
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
||||||
if len(out)+len(ad)+via.ConnectionState.eKey.Overhead() > cap(out) {
|
if len(out)+len(ad)+via.ConnectionState.eKey.Overhead() > cap(out) {
|
||||||
|
if noiseutil.EncryptLockNeeded {
|
||||||
|
via.ConnectionState.writeLock.Unlock()
|
||||||
|
}
|
||||||
via.logger(f.l).
|
via.logger(f.l).
|
||||||
WithField("outCap", cap(out)).
|
WithField("outCap", cap(out)).
|
||||||
WithField("payloadLen", len(ad)).
|
WithField("payloadLen", len(ad)).
|
||||||
@@ -250,6 +290,9 @@ func (f *Interface) SendVia(viaIfc interface{},
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
out, err = via.ConnectionState.eKey.EncryptDanger(out, out, nil, c, nb)
|
out, err = via.ConnectionState.eKey.EncryptDanger(out, out, nil, c, nb)
|
||||||
|
if noiseutil.EncryptLockNeeded {
|
||||||
|
via.ConnectionState.writeLock.Unlock()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
via.logger(f.l).WithError(err).Info("Failed to EncryptDanger in sendVia")
|
via.logger(f.l).WithError(err).Info("Failed to EncryptDanger in sendVia")
|
||||||
return
|
return
|
||||||
@@ -258,6 +301,7 @@ func (f *Interface) SendVia(viaIfc interface{},
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
via.logger(f.l).WithError(err).Info("Failed to WriteTo in sendVia")
|
via.logger(f.l).WithError(err).Info("Failed to WriteTo in sendVia")
|
||||||
}
|
}
|
||||||
|
f.connectionManager.RelayUsed(relay.LocalIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) {
|
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) {
|
||||||
@@ -278,13 +322,15 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
out = out[header.Len:]
|
out = out[header.Len:]
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: enable if we do more than 1 tun queue
|
if noiseutil.EncryptLockNeeded {
|
||||||
//ci.writeLock.Lock()
|
// NOTE: for goboring AESGCMTLS we need to lock because of the nonce check
|
||||||
c := atomic.AddUint64(&ci.atomicMessageCounter, 1)
|
ci.writeLock.Lock()
|
||||||
|
}
|
||||||
|
c := ci.messageCounter.Add(1)
|
||||||
|
|
||||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||||
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||||
f.connectionManager.Out(hostinfo.vpnIp)
|
f.connectionManager.Out(hostinfo.localIndexId)
|
||||||
|
|
||||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||||
// all our IPs and enable a faster roaming.
|
// all our IPs and enable a faster roaming.
|
||||||
@@ -300,8 +346,9 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
||||||
//TODO: see above note on lock
|
if noiseutil.EncryptLockNeeded {
|
||||||
//ci.writeLock.Unlock()
|
ci.writeLock.Unlock()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).
|
hostinfo.logger(f.l).WithError(err).
|
||||||
WithField("udpAddr", remote).WithField("counter", c).
|
WithField("udpAddr", remote).WithField("counter", c).
|
||||||
@@ -325,31 +372,19 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
} else {
|
} else {
|
||||||
// Try to send via a relay
|
// Try to send via a relay
|
||||||
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
||||||
relayHostInfo, err := f.hostMap.QueryVpnIp(relayIP)
|
relayHostInfo, relay, err := f.hostMap.QueryVpnIpRelayFor(hostinfo.vpnIp, relayIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithField("relayIp", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
hostinfo.relayState.DeleteRelay(relayIP)
|
||||||
continue
|
hostinfo.logger(f.l).WithField("relay", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
||||||
}
|
|
||||||
relay, ok := relayHostInfo.relayState.QueryRelayForByIp(hostinfo.vpnIp)
|
|
||||||
if !ok {
|
|
||||||
hostinfo.logger(f.l).
|
|
||||||
WithField("relayIp", relayHostInfo.vpnIp).
|
|
||||||
WithField("relayTarget", hostinfo.vpnIp).
|
|
||||||
Info("sendNoMetrics relay missing object for target")
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true)
|
f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isMulticast(ip iputil.VpnIp) bool {
|
func isMulticast(ip iputil.VpnIp) bool {
|
||||||
// Class D multicast
|
// Class D multicast
|
||||||
if (((ip >> 24) & 0xff) & 0xf0) == 0xe0 {
|
return (((ip >> 24) & 0xff) & 0xf0) == 0xe0
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|||||||
6
inside_bsd.go
Normal file
6
inside_bsd.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
|
||||||
|
// +build darwin dragonfly freebsd netbsd openbsd
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
const immediatelyForwardToSelf bool = true
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
package nebula
|
|
||||||
|
|
||||||
const immediatelyForwardToSelf bool = true
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
//go:build !darwin
|
//go:build !darwin && !dragonfly && !freebsd && !netbsd && !openbsd
|
||||||
// +build !darwin
|
// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd
|
||||||
|
|
||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
|
|||||||
44
interface.go
44
interface.go
@@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/overlay"
|
"github.com/slackhq/nebula/overlay"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
@@ -33,8 +34,8 @@ type InterfaceConfig struct {
|
|||||||
ServeDns bool
|
ServeDns bool
|
||||||
HandshakeManager *HandshakeManager
|
HandshakeManager *HandshakeManager
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
checkInterval int
|
checkInterval time.Duration
|
||||||
pendingDeletionInterval int
|
pendingDeletionInterval time.Duration
|
||||||
DropLocalBroadcast bool
|
DropLocalBroadcast bool
|
||||||
DropMulticast bool
|
DropMulticast bool
|
||||||
routines int
|
routines int
|
||||||
@@ -43,6 +44,7 @@ type InterfaceConfig struct {
|
|||||||
caPool *cert.NebulaCAPool
|
caPool *cert.NebulaCAPool
|
||||||
disconnectInvalid bool
|
disconnectInvalid bool
|
||||||
relayManager *relayManager
|
relayManager *relayManager
|
||||||
|
punchy *Punchy
|
||||||
|
|
||||||
ConntrackCacheTimeout time.Duration
|
ConntrackCacheTimeout time.Duration
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
@@ -52,7 +54,7 @@ type Interface struct {
|
|||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
outside *udp.Conn
|
outside *udp.Conn
|
||||||
inside overlay.Device
|
inside overlay.Device
|
||||||
certState *CertState
|
certState atomic.Pointer[CertState]
|
||||||
cipher string
|
cipher string
|
||||||
firewall *Firewall
|
firewall *Firewall
|
||||||
connectionManager *connectionManager
|
connectionManager *connectionManager
|
||||||
@@ -67,7 +69,7 @@ type Interface struct {
|
|||||||
routines int
|
routines int
|
||||||
caPool *cert.NebulaCAPool
|
caPool *cert.NebulaCAPool
|
||||||
disconnectInvalid bool
|
disconnectInvalid bool
|
||||||
closed int32
|
closed atomic.Bool
|
||||||
relayManager *relayManager
|
relayManager *relayManager
|
||||||
|
|
||||||
sendRecvErrorConfig sendRecvErrorConfig
|
sendRecvErrorConfig sendRecvErrorConfig
|
||||||
@@ -88,6 +90,19 @@ type Interface struct {
|
|||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EncWriter interface {
|
||||||
|
SendVia(via *HostInfo,
|
||||||
|
relay *Relay,
|
||||||
|
ad,
|
||||||
|
nb,
|
||||||
|
out []byte,
|
||||||
|
nocopy bool,
|
||||||
|
)
|
||||||
|
SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte)
|
||||||
|
SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte)
|
||||||
|
Handshake(vpnIp iputil.VpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
type sendRecvErrorConfig uint8
|
type sendRecvErrorConfig uint8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -141,7 +156,6 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
hostMap: c.HostMap,
|
hostMap: c.HostMap,
|
||||||
outside: c.Outside,
|
outside: c.Outside,
|
||||||
inside: c.Inside,
|
inside: c.Inside,
|
||||||
certState: c.certState,
|
|
||||||
cipher: c.Cipher,
|
cipher: c.Cipher,
|
||||||
firewall: c.Firewall,
|
firewall: c.Firewall,
|
||||||
serveDns: c.ServeDns,
|
serveDns: c.ServeDns,
|
||||||
@@ -172,7 +186,8 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
l: c.l,
|
l: c.l,
|
||||||
}
|
}
|
||||||
|
|
||||||
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval)
|
ifce.certState.Store(c.certState)
|
||||||
|
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
|
||||||
|
|
||||||
return ifce, nil
|
return ifce, nil
|
||||||
}
|
}
|
||||||
@@ -190,6 +205,7 @@ func (f *Interface) activate() {
|
|||||||
|
|
||||||
f.l.WithField("interface", f.inside.Name()).WithField("network", f.inside.Cidr().String()).
|
f.l.WithField("interface", f.inside.Name()).WithField("network", f.inside.Cidr().String()).
|
||||||
WithField("build", f.version).WithField("udpAddr", addr).
|
WithField("build", f.version).WithField("udpAddr", addr).
|
||||||
|
WithField("boringcrypto", boringEnabled()).
|
||||||
Info("Nebula interface is active")
|
Info("Nebula interface is active")
|
||||||
|
|
||||||
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
|
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
|
||||||
@@ -237,7 +253,7 @@ func (f *Interface) listenOut(i int) {
|
|||||||
|
|
||||||
lhh := f.lightHouse.NewRequestHandler()
|
lhh := f.lightHouse.NewRequestHandler()
|
||||||
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||||
li.ListenOut(f.readOutsidePackets, lhh.HandleRequest, conntrackCache, i)
|
li.ListenOut(readOutsidePackets(f), lhHandleRequest(lhh, f), conntrackCache, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
||||||
@@ -253,7 +269,7 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
|||||||
for {
|
for {
|
||||||
n, err := reader.Read(packet)
|
n, err := reader.Read(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrClosed) && atomic.LoadInt32(&f.closed) != 0 {
|
if errors.Is(err, os.ErrClosed) && f.closed.Load() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,14 +314,15 @@ func (f *Interface) reloadCertKey(c *config.C) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// did IP in cert change? if so, don't set
|
// did IP in cert change? if so, don't set
|
||||||
oldIPs := f.certState.certificate.Details.Ips
|
currentCert := f.certState.Load().certificate
|
||||||
|
oldIPs := currentCert.Details.Ips
|
||||||
newIPs := cs.certificate.Details.Ips
|
newIPs := cs.certificate.Details.Ips
|
||||||
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
|
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
|
||||||
f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
|
f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.certState = cs
|
f.certState.Store(cs)
|
||||||
f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
|
f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -316,7 +333,7 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fw, err := NewFirewallFromConfig(f.l, f.certState.certificate, c)
|
fw, err := NewFirewallFromConfig(f.l, f.certState.Load().certificate, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).Error("Error while creating firewall during reload")
|
f.l.WithError(err).Error("Error while creating firewall during reload")
|
||||||
return
|
return
|
||||||
@@ -378,6 +395,8 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
|||||||
|
|
||||||
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
||||||
|
|
||||||
|
certExpirationGauge := metrics.GetOrRegisterGauge("certificate.ttl_seconds", nil)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -386,12 +405,13 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
|||||||
f.firewall.EmitStats()
|
f.firewall.EmitStats()
|
||||||
f.handshakeManager.EmitStats()
|
f.handshakeManager.EmitStats()
|
||||||
udpStats()
|
udpStats()
|
||||||
|
certExpirationGauge.Update(int64(f.certState.Load().certificate.Details.NotAfter.Sub(time.Now()) / time.Second))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) Close() error {
|
func (f *Interface) Close() error {
|
||||||
atomic.StoreInt32(&f.closed, 1)
|
f.closed.Store(true)
|
||||||
|
|
||||||
// Release the tun device
|
// Release the tun device
|
||||||
return f.inside.Close()
|
return f.inside.Close()
|
||||||
|
|||||||
211
iputil/packet.go
Normal file
211
iputil/packet.go
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
package iputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"golang.org/x/net/ipv4"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CreateRejectPacket(packet []byte, out []byte) []byte {
|
||||||
|
// TODO ipv4 only, need to fix when inside supports ipv6
|
||||||
|
switch packet[9] {
|
||||||
|
case 6: // tcp
|
||||||
|
return ipv4CreateRejectTCPPacket(packet, out)
|
||||||
|
default:
|
||||||
|
return ipv4CreateRejectICMPPacket(packet, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ipv4CreateRejectICMPPacket(packet []byte, out []byte) []byte {
|
||||||
|
ihl := int(packet[0]&0x0f) << 2
|
||||||
|
|
||||||
|
// ICMP reply includes header and first 8 bytes of the packet
|
||||||
|
packetLen := len(packet)
|
||||||
|
if packetLen > ihl+8 {
|
||||||
|
packetLen = ihl + 8
|
||||||
|
}
|
||||||
|
|
||||||
|
outLen := ipv4.HeaderLen + 8 + packetLen
|
||||||
|
|
||||||
|
out = out[:(outLen)]
|
||||||
|
|
||||||
|
ipHdr := out[0:ipv4.HeaderLen]
|
||||||
|
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
|
||||||
|
ipHdr[1] = 0 // DSCP, ECN
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[2:], uint16(ipv4.HeaderLen+8+packetLen)) // Total Length
|
||||||
|
|
||||||
|
ipHdr[4] = 0 // id
|
||||||
|
ipHdr[5] = 0 // .
|
||||||
|
ipHdr[6] = 0 // flags, fragment offset
|
||||||
|
ipHdr[7] = 0 // .
|
||||||
|
ipHdr[8] = 64 // TTL
|
||||||
|
ipHdr[9] = 1 // protocol (icmp)
|
||||||
|
ipHdr[10] = 0 // checksum
|
||||||
|
ipHdr[11] = 0 // .
|
||||||
|
|
||||||
|
// Swap dest / src IPs
|
||||||
|
copy(ipHdr[12:16], packet[16:20])
|
||||||
|
copy(ipHdr[16:20], packet[12:16])
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[10:], tcpipChecksum(ipHdr, 0))
|
||||||
|
|
||||||
|
// ICMP Destination Unreachable
|
||||||
|
icmpOut := out[ipv4.HeaderLen:]
|
||||||
|
icmpOut[0] = 3 // type (Destination unreachable)
|
||||||
|
icmpOut[1] = 3 // code (Port unreachable error)
|
||||||
|
icmpOut[2] = 0 // checksum
|
||||||
|
icmpOut[3] = 0 // .
|
||||||
|
icmpOut[4] = 0 // unused
|
||||||
|
icmpOut[5] = 0 // .
|
||||||
|
icmpOut[6] = 0 // .
|
||||||
|
icmpOut[7] = 0 // .
|
||||||
|
|
||||||
|
// Copy original IP header and first 8 bytes as body
|
||||||
|
copy(icmpOut[8:], packet[:packetLen])
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
binary.BigEndian.PutUint16(icmpOut[2:], tcpipChecksum(icmpOut, 0))
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func ipv4CreateRejectTCPPacket(packet []byte, out []byte) []byte {
|
||||||
|
const tcpLen = 20
|
||||||
|
|
||||||
|
ihl := int(packet[0]&0x0f) << 2
|
||||||
|
outLen := ipv4.HeaderLen + tcpLen
|
||||||
|
|
||||||
|
out = out[:(outLen)]
|
||||||
|
|
||||||
|
ipHdr := out[0:ipv4.HeaderLen]
|
||||||
|
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
|
||||||
|
ipHdr[1] = 0 // DSCP, ECN
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[2:], uint16(outLen)) // Total Length
|
||||||
|
ipHdr[4] = 0 // id
|
||||||
|
ipHdr[5] = 0 // .
|
||||||
|
ipHdr[6] = 0 // flags, fragment offset
|
||||||
|
ipHdr[7] = 0 // .
|
||||||
|
ipHdr[8] = 64 // TTL
|
||||||
|
ipHdr[9] = 6 // protocol (tcp)
|
||||||
|
ipHdr[10] = 0 // checksum
|
||||||
|
ipHdr[11] = 0 // .
|
||||||
|
|
||||||
|
// Swap dest / src IPs
|
||||||
|
copy(ipHdr[12:16], packet[16:20])
|
||||||
|
copy(ipHdr[16:20], packet[12:16])
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
binary.BigEndian.PutUint16(ipHdr[10:], tcpipChecksum(ipHdr, 0))
|
||||||
|
|
||||||
|
// TCP RST
|
||||||
|
tcpIn := packet[ihl:]
|
||||||
|
var ackSeq, seq uint32
|
||||||
|
outFlags := byte(0b00000100) // RST
|
||||||
|
|
||||||
|
// Set seq and ackSeq based on how iptables/netfilter does it in Linux:
|
||||||
|
// - https://github.com/torvalds/linux/blob/v5.19/net/ipv4/netfilter/nf_reject_ipv4.c#L193-L221
|
||||||
|
inAck := tcpIn[13]&0b00010000 != 0
|
||||||
|
if inAck {
|
||||||
|
seq = binary.BigEndian.Uint32(tcpIn[8:])
|
||||||
|
} else {
|
||||||
|
inSyn := uint32((tcpIn[13] & 0b00000010) >> 1)
|
||||||
|
inFin := uint32(tcpIn[13] & 0b00000001)
|
||||||
|
// seq from the packet + syn + fin + tcp segment length
|
||||||
|
ackSeq = binary.BigEndian.Uint32(tcpIn[4:]) + inSyn + inFin + uint32(len(tcpIn)) - uint32(tcpIn[12]>>4)<<2
|
||||||
|
outFlags |= 0b00010000 // ACK
|
||||||
|
}
|
||||||
|
|
||||||
|
tcpOut := out[ipv4.HeaderLen:]
|
||||||
|
// Swap dest / src ports
|
||||||
|
copy(tcpOut[0:2], tcpIn[2:4])
|
||||||
|
copy(tcpOut[2:4], tcpIn[0:2])
|
||||||
|
binary.BigEndian.PutUint32(tcpOut[4:], seq)
|
||||||
|
binary.BigEndian.PutUint32(tcpOut[8:], ackSeq)
|
||||||
|
tcpOut[12] = (tcpLen >> 2) << 4 // data offset, reserved, NS
|
||||||
|
tcpOut[13] = outFlags // CWR, ECE, URG, ACK, PSH, RST, SYN, FIN
|
||||||
|
tcpOut[14] = 0 // window size
|
||||||
|
tcpOut[15] = 0 // .
|
||||||
|
tcpOut[16] = 0 // checksum
|
||||||
|
tcpOut[17] = 0 // .
|
||||||
|
tcpOut[18] = 0 // URG Pointer
|
||||||
|
tcpOut[19] = 0 // .
|
||||||
|
|
||||||
|
// Calculate checksum
|
||||||
|
csum := ipv4PseudoheaderChecksum(ipHdr[12:16], ipHdr[16:20], 6, tcpLen)
|
||||||
|
binary.BigEndian.PutUint16(tcpOut[16:], tcpipChecksum(tcpOut, csum))
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateICMPEchoResponse(packet, out []byte) []byte {
|
||||||
|
// Return early if this is not a simple ICMP Echo Request
|
||||||
|
//TODO: make constants out of these
|
||||||
|
if !(len(packet) >= 28 && len(packet) <= 9001 && packet[0] == 0x45 && packet[9] == 0x01 && packet[20] == 0x08) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't support fragmented packets
|
||||||
|
if packet[7] != 0 || (packet[6]&0x2F != 0) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out = out[:len(packet)]
|
||||||
|
|
||||||
|
copy(out, packet)
|
||||||
|
|
||||||
|
// Swap dest / src IPs and recalculate checksum
|
||||||
|
ipv4 := out[0:20]
|
||||||
|
copy(ipv4[12:16], packet[16:20])
|
||||||
|
copy(ipv4[16:20], packet[12:16])
|
||||||
|
ipv4[10] = 0
|
||||||
|
ipv4[11] = 0
|
||||||
|
binary.BigEndian.PutUint16(ipv4[10:], tcpipChecksum(ipv4, 0))
|
||||||
|
|
||||||
|
// Change type to ICMP Echo Reply and recalculate checksum
|
||||||
|
icmp := out[20:]
|
||||||
|
icmp[0] = 0
|
||||||
|
icmp[2] = 0
|
||||||
|
icmp[3] = 0
|
||||||
|
binary.BigEndian.PutUint16(icmp[2:], tcpipChecksum(icmp, 0))
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculates the TCP/IP checksum defined in rfc1071. The passed-in
|
||||||
|
// csum is any initial checksum data that's already been computed.
|
||||||
|
//
|
||||||
|
// based on:
|
||||||
|
// - https://github.com/google/gopacket/blob/v1.1.19/layers/tcpip.go#L50-L70
|
||||||
|
func tcpipChecksum(data []byte, csum uint32) uint16 {
|
||||||
|
// to handle odd lengths, we loop to length - 1, incrementing by 2, then
|
||||||
|
// handle the last byte specifically by checking against the original
|
||||||
|
// length.
|
||||||
|
length := len(data) - 1
|
||||||
|
for i := 0; i < length; i += 2 {
|
||||||
|
// For our test packet, doing this manually is about 25% faster
|
||||||
|
// (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16.
|
||||||
|
csum += uint32(data[i]) << 8
|
||||||
|
csum += uint32(data[i+1])
|
||||||
|
}
|
||||||
|
if len(data)%2 == 1 {
|
||||||
|
csum += uint32(data[length]) << 8
|
||||||
|
}
|
||||||
|
for csum > 0xffff {
|
||||||
|
csum = (csum >> 16) + (csum & 0xffff)
|
||||||
|
}
|
||||||
|
return ^uint16(csum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// based on:
|
||||||
|
// - https://github.com/google/gopacket/blob/v1.1.19/layers/tcpip.go#L26-L35
|
||||||
|
func ipv4PseudoheaderChecksum(src, dst []byte, proto, length uint32) (csum uint32) {
|
||||||
|
csum += (uint32(src[0]) + uint32(src[2])) << 8
|
||||||
|
csum += uint32(src[1]) + uint32(src[3])
|
||||||
|
csum += (uint32(dst[0]) + uint32(dst[2])) << 8
|
||||||
|
csum += uint32(dst[1]) + uint32(dst[3])
|
||||||
|
csum += proto
|
||||||
|
csum += length & 0xffff
|
||||||
|
csum += length >> 16
|
||||||
|
return csum
|
||||||
|
}
|
||||||
371
lighthouse.go
371
lighthouse.go
@@ -6,13 +6,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/netip"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
@@ -33,6 +34,7 @@ type netIpAndPort struct {
|
|||||||
type LightHouse struct {
|
type LightHouse struct {
|
||||||
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
|
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
|
||||||
sync.RWMutex //Because we concurrently read and write to our maps
|
sync.RWMutex //Because we concurrently read and write to our maps
|
||||||
|
ctx context.Context
|
||||||
amLighthouse bool
|
amLighthouse bool
|
||||||
myVpnIp iputil.VpnIp
|
myVpnIp iputil.VpnIp
|
||||||
myVpnZeros iputil.VpnIp
|
myVpnZeros iputil.VpnIp
|
||||||
@@ -49,29 +51,31 @@ type LightHouse struct {
|
|||||||
// respond with.
|
// respond with.
|
||||||
// - When we are not a lighthouse, this filters which addresses we accept
|
// - When we are not a lighthouse, this filters which addresses we accept
|
||||||
// from lighthouses.
|
// from lighthouses.
|
||||||
atomicRemoteAllowList *RemoteAllowList
|
remoteAllowList atomic.Pointer[RemoteAllowList]
|
||||||
|
|
||||||
// filters local addresses that we advertise to lighthouses
|
// filters local addresses that we advertise to lighthouses
|
||||||
atomicLocalAllowList *LocalAllowList
|
localAllowList atomic.Pointer[LocalAllowList]
|
||||||
|
|
||||||
// used to trigger the HandshakeManager when we receive HostQueryReply
|
// used to trigger the HandshakeManager when we receive HostQueryReply
|
||||||
handshakeTrigger chan<- iputil.VpnIp
|
handshakeTrigger chan<- iputil.VpnIp
|
||||||
|
|
||||||
// atomicStaticList exists to avoid having a bool in each addrMap entry
|
// staticList exists to avoid having a bool in each addrMap entry
|
||||||
// since static should be rare
|
// since static should be rare
|
||||||
atomicStaticList map[iputil.VpnIp]struct{}
|
staticList atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||||
atomicLighthouses map[iputil.VpnIp]struct{}
|
lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||||
|
|
||||||
atomicInterval int64
|
interval atomic.Int64
|
||||||
updateCancel context.CancelFunc
|
updateCancel context.CancelFunc
|
||||||
updateParentCtx context.Context
|
updateParentCtx context.Context
|
||||||
updateUdp udp.EncWriter
|
updateUdp EncWriter
|
||||||
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
||||||
|
|
||||||
atomicAdvertiseAddrs []netIpAndPort
|
advertiseAddrs atomic.Pointer[[]netIpAndPort]
|
||||||
|
|
||||||
// IP's of relays that can be used by peers to access me
|
// IP's of relays that can be used by peers to access me
|
||||||
atomicRelaysForMe []iputil.VpnIp
|
relaysForMe atomic.Pointer[[]iputil.VpnIp]
|
||||||
|
|
||||||
|
calculatedRemotes atomic.Pointer[cidr.Tree4] // Maps VpnIp to []*calculatedRemote
|
||||||
|
|
||||||
metrics *MessageMetrics
|
metrics *MessageMetrics
|
||||||
metricHolepunchTx metrics.Counter
|
metricHolepunchTx metrics.Counter
|
||||||
@@ -80,7 +84,7 @@ type LightHouse struct {
|
|||||||
|
|
||||||
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
|
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
|
||||||
// addrMap should be nil unless this is during a config reload
|
// addrMap should be nil unless this is during a config reload
|
||||||
func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc *udp.Conn, p *Punchy) (*LightHouse, error) {
|
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc *udp.Conn, p *Punchy) (*LightHouse, error) {
|
||||||
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
||||||
nebulaPort := uint32(c.GetInt("listen.port", 0))
|
nebulaPort := uint32(c.GetInt("listen.port", 0))
|
||||||
if amLighthouse && nebulaPort == 0 {
|
if amLighthouse && nebulaPort == 0 {
|
||||||
@@ -98,18 +102,21 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet,
|
|||||||
|
|
||||||
ones, _ := myVpnNet.Mask.Size()
|
ones, _ := myVpnNet.Mask.Size()
|
||||||
h := LightHouse{
|
h := LightHouse{
|
||||||
amLighthouse: amLighthouse,
|
ctx: ctx,
|
||||||
myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP),
|
amLighthouse: amLighthouse,
|
||||||
myVpnZeros: iputil.VpnIp(32 - ones),
|
myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP),
|
||||||
myVpnNet: myVpnNet,
|
myVpnZeros: iputil.VpnIp(32 - ones),
|
||||||
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
myVpnNet: myVpnNet,
|
||||||
nebulaPort: nebulaPort,
|
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
||||||
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
|
nebulaPort: nebulaPort,
|
||||||
atomicStaticList: make(map[iputil.VpnIp]struct{}),
|
punchConn: pc,
|
||||||
punchConn: pc,
|
punchy: p,
|
||||||
punchy: p,
|
l: l,
|
||||||
l: l,
|
|
||||||
}
|
}
|
||||||
|
lighthouses := make(map[iputil.VpnIp]struct{})
|
||||||
|
h.lighthouses.Store(&lighthouses)
|
||||||
|
staticList := make(map[iputil.VpnIp]struct{})
|
||||||
|
h.staticList.Store(&staticList)
|
||||||
|
|
||||||
if c.GetBool("stats.lighthouse_metrics", false) {
|
if c.GetBool("stats.lighthouse_metrics", false) {
|
||||||
h.metrics = newLighthouseMetrics()
|
h.metrics = newLighthouseMetrics()
|
||||||
@@ -137,31 +144,35 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
|
func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
|
||||||
return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList))))
|
return *lh.staticList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
|
func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
|
||||||
return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses))))
|
return *lh.lighthouses.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList {
|
func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList {
|
||||||
return (*RemoteAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList))))
|
return lh.remoteAllowList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
|
func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
|
||||||
return (*LocalAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList))))
|
return lh.localAllowList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort {
|
func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort {
|
||||||
return *(*[]netIpAndPort)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs))))
|
return *lh.advertiseAddrs.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
|
func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
|
||||||
return *(*[]iputil.VpnIp)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe))))
|
return *lh.relaysForMe.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4 {
|
||||||
|
return lh.calculatedRemotes.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetUpdateInterval() int64 {
|
func (lh *LightHouse) GetUpdateInterval() int64 {
|
||||||
return atomic.LoadInt64(&lh.atomicInterval)
|
return lh.interval.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||||
@@ -188,7 +199,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort})
|
advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort})
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)), unsafe.Pointer(&advAddrs))
|
lh.advertiseAddrs.Store(&advAddrs)
|
||||||
|
|
||||||
if !initial {
|
if !initial {
|
||||||
lh.l.Info("lighthouse.advertise_addrs has changed")
|
lh.l.Info("lighthouse.advertise_addrs has changed")
|
||||||
@@ -196,10 +207,10 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if initial || c.HasChanged("lighthouse.interval") {
|
if initial || c.HasChanged("lighthouse.interval") {
|
||||||
atomic.StoreInt64(&lh.atomicInterval, int64(c.GetInt("lighthouse.interval", 10)))
|
lh.interval.Store(int64(c.GetInt("lighthouse.interval", 10)))
|
||||||
|
|
||||||
if !initial {
|
if !initial {
|
||||||
lh.l.Infof("lighthouse.interval changed to %v", lh.atomicInterval)
|
lh.l.Infof("lighthouse.interval changed to %v", lh.interval.Load())
|
||||||
|
|
||||||
if lh.updateCancel != nil {
|
if lh.updateCancel != nil {
|
||||||
// May not always have a running routine
|
// May not always have a running routine
|
||||||
@@ -216,7 +227,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
|
return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)), unsafe.Pointer(ral))
|
lh.remoteAllowList.Store(ral)
|
||||||
if !initial {
|
if !initial {
|
||||||
//TODO: a diff will be annoyingly difficult
|
//TODO: a diff will be annoyingly difficult
|
||||||
lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed")
|
lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed")
|
||||||
@@ -229,27 +240,62 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
|
return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)), unsafe.Pointer(lal))
|
lh.localAllowList.Store(lal)
|
||||||
if !initial {
|
if !initial {
|
||||||
//TODO: a diff will be annoyingly difficult
|
//TODO: a diff will be annoyingly difficult
|
||||||
lh.l.Info("lighthouse.local_allow_list has changed")
|
lh.l.Info("lighthouse.local_allow_list has changed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if initial || c.HasChanged("lighthouse.calculated_remotes") {
|
||||||
|
cr, err := NewCalculatedRemotesFromConfig(c, "lighthouse.calculated_remotes")
|
||||||
|
if err != nil {
|
||||||
|
return util.NewContextualError("Invalid lighthouse.calculated_remotes", nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lh.calculatedRemotes.Store(cr)
|
||||||
|
if !initial {
|
||||||
|
//TODO: a diff will be annoyingly difficult
|
||||||
|
lh.l.Info("lighthouse.calculated_remotes has changed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
|
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
|
||||||
if initial || c.HasChanged("static_host_map") {
|
if initial || c.HasChanged("static_host_map") || c.HasChanged("static_map.cadence") || c.HasChanged("static_map.network") || c.HasChanged("static_map.lookup_timeout") {
|
||||||
|
// Clean up. Entries still in the static_host_map will be re-built.
|
||||||
|
// Entries no longer present must have their (possible) background DNS goroutines stopped.
|
||||||
|
if existingStaticList := lh.staticList.Load(); existingStaticList != nil {
|
||||||
|
lh.RLock()
|
||||||
|
for staticVpnIp := range *existingStaticList {
|
||||||
|
if am, ok := lh.addrMap[staticVpnIp]; ok && am != nil {
|
||||||
|
am.hr.Cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lh.RUnlock()
|
||||||
|
}
|
||||||
|
// Build a new list based on current config.
|
||||||
staticList := make(map[iputil.VpnIp]struct{})
|
staticList := make(map[iputil.VpnIp]struct{})
|
||||||
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
|
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)), unsafe.Pointer(&staticList))
|
lh.staticList.Store(&staticList)
|
||||||
if !initial {
|
if !initial {
|
||||||
//TODO: we should remove any remote list entries for static hosts that were removed/modified?
|
//TODO: we should remove any remote list entries for static hosts that were removed/modified?
|
||||||
lh.l.Info("static_host_map has changed")
|
if c.HasChanged("static_host_map") {
|
||||||
|
lh.l.Info("static_host_map has changed")
|
||||||
|
}
|
||||||
|
if c.HasChanged("static_map.cadence") {
|
||||||
|
lh.l.Info("static_map.cadence has changed")
|
||||||
|
}
|
||||||
|
if c.HasChanged("static_map.network") {
|
||||||
|
lh.l.Info("static_map.network has changed")
|
||||||
|
}
|
||||||
|
if c.HasChanged("static_map.lookup_timeout") {
|
||||||
|
lh.l.Info("static_map.lookup_timeout has changed")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if initial || c.HasChanged("lighthouse.hosts") {
|
if initial || c.HasChanged("lighthouse.hosts") {
|
||||||
@@ -259,7 +305,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)), unsafe.Pointer(&lhMap))
|
lh.lighthouses.Store(&lhMap)
|
||||||
if !initial {
|
if !initial {
|
||||||
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
|
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
|
||||||
lh.l.Info("lighthouse.hosts has changed")
|
lh.l.Info("lighthouse.hosts has changed")
|
||||||
@@ -274,18 +320,18 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
lh.l.Info("Ignoring relays from config because am_relay is true")
|
lh.l.Info("Ignoring relays from config because am_relay is true")
|
||||||
}
|
}
|
||||||
relaysForMe := []iputil.VpnIp{}
|
relaysForMe := []iputil.VpnIp{}
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe))
|
lh.relaysForMe.Store(&relaysForMe)
|
||||||
case false:
|
case false:
|
||||||
relaysForMe := []iputil.VpnIp{}
|
relaysForMe := []iputil.VpnIp{}
|
||||||
for _, v := range c.GetStringSlice("relay.relays", nil) {
|
for _, v := range c.GetStringSlice("relay.relays", nil) {
|
||||||
lh.l.WithField("RelayIP", v).Info("Read relay from config")
|
lh.l.WithField("relay", v).Info("Read relay from config")
|
||||||
|
|
||||||
configRIP := net.ParseIP(v)
|
configRIP := net.ParseIP(v)
|
||||||
if configRIP != nil {
|
if configRIP != nil {
|
||||||
relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP))
|
relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe))
|
lh.relaysForMe.Store(&relaysForMe)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -323,7 +369,48 @@ func (lh *LightHouse) parseLighthouses(c *config.C, tunCidr *net.IPNet, lhMap ma
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getStaticMapCadence(c *config.C) (time.Duration, error) {
|
||||||
|
cadence := c.GetString("static_map.cadence", "30s")
|
||||||
|
d, err := time.ParseDuration(cadence)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStaticMapLookupTimeout(c *config.C) (time.Duration, error) {
|
||||||
|
lookupTimeout := c.GetString("static_map.lookup_timeout", "250ms")
|
||||||
|
d, err := time.ParseDuration(lookupTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStaticMapNetwork(c *config.C) (string, error) {
|
||||||
|
network := c.GetString("static_map.network", "ip4")
|
||||||
|
if network != "ip" && network != "ip4" && network != "ip6" {
|
||||||
|
return "", fmt.Errorf("static_map.network must be one of ip, ip4, or ip6")
|
||||||
|
}
|
||||||
|
return network, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error {
|
func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error {
|
||||||
|
d, err := getStaticMapCadence(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
network, err := getStaticMapNetwork(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup_timeout, err := getStaticMapLookupTimeout(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
shm := c.GetMap("static_host_map", map[interface{}]interface{}{})
|
shm := c.GetMap("static_host_map", map[interface{}]interface{}{})
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
@@ -339,21 +426,17 @@ func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList
|
|||||||
|
|
||||||
vpnIp := iputil.Ip2VpnIp(rip)
|
vpnIp := iputil.Ip2VpnIp(rip)
|
||||||
vals, ok := v.([]interface{})
|
vals, ok := v.([]interface{})
|
||||||
if ok {
|
if !ok {
|
||||||
for _, v := range vals {
|
vals = []interface{}{v}
|
||||||
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
|
}
|
||||||
if err != nil {
|
remoteAddrs := []string{}
|
||||||
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
|
for _, v := range vals {
|
||||||
}
|
remoteAddrs = append(remoteAddrs, fmt.Sprintf("%v", v))
|
||||||
lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
err := lh.addStaticRemotes(i, d, network, lookup_timeout, vpnIp, remoteAddrs, staticList)
|
||||||
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
|
|
||||||
}
|
|
||||||
lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList)
|
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
@@ -361,7 +444,7 @@ func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) Query(ip iputil.VpnIp, f udp.EncWriter) *RemoteList {
|
func (lh *LightHouse) Query(ip iputil.VpnIp, f EncWriter) *RemoteList {
|
||||||
if !lh.IsLighthouseIP(ip) {
|
if !lh.IsLighthouseIP(ip) {
|
||||||
lh.QueryServer(ip, f)
|
lh.QueryServer(ip, f)
|
||||||
}
|
}
|
||||||
@@ -375,7 +458,7 @@ func (lh *LightHouse) Query(ip iputil.VpnIp, f udp.EncWriter) *RemoteList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This is asynchronous so no reply should be expected
|
// This is asynchronous so no reply should be expected
|
||||||
func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f udp.EncWriter) {
|
func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f EncWriter) {
|
||||||
if lh.amLighthouse {
|
if lh.amLighthouse {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -460,43 +543,123 @@ func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
|
|||||||
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner
|
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner
|
||||||
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
|
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
|
||||||
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
|
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
|
||||||
//NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
|
// NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
|
||||||
func (lh *LightHouse) addStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr, staticList map[iputil.VpnIp]struct{}) {
|
func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, timeout time.Duration, vpnIp iputil.VpnIp, toAddrs []string, staticList map[iputil.VpnIp]struct{}) error {
|
||||||
|
lh.Lock()
|
||||||
|
am := lh.unlockedGetRemoteList(vpnIp)
|
||||||
|
am.Lock()
|
||||||
|
defer am.Unlock()
|
||||||
|
ctx := lh.ctx
|
||||||
|
lh.Unlock()
|
||||||
|
|
||||||
|
hr, err := NewHostnameResults(ctx, lh.l, d, network, timeout, toAddrs, func() {
|
||||||
|
// This callback runs whenever the DNS hostname resolver finds a different set of IP's
|
||||||
|
// in its resolution for hostnames.
|
||||||
|
am.Lock()
|
||||||
|
defer am.Unlock()
|
||||||
|
am.shouldRebuild = true
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
|
||||||
|
}
|
||||||
|
am.unlockedSetHostnamesResults(hr)
|
||||||
|
|
||||||
|
for _, addrPort := range hr.GetIPs() {
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case addrPort.Addr().Is4():
|
||||||
|
to := NewIp4AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
|
||||||
|
if !lh.unlockedShouldAddV4(vpnIp, to) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
am.unlockedPrependV4(lh.myVpnIp, to)
|
||||||
|
case addrPort.Addr().Is6():
|
||||||
|
to := NewIp6AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
|
||||||
|
if !lh.unlockedShouldAddV6(vpnIp, to) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
am.unlockedPrependV6(lh.myVpnIp, to)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark it as static in the caller provided map
|
||||||
|
staticList[vpnIp] = struct{}{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addCalculatedRemotes adds any calculated remotes based on the
|
||||||
|
// lighthouse.calculated_remotes configuration. It returns true if any
|
||||||
|
// calculated remotes were added
|
||||||
|
func (lh *LightHouse) addCalculatedRemotes(vpnIp iputil.VpnIp) bool {
|
||||||
|
tree := lh.getCalculatedRemotes()
|
||||||
|
if tree == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
value := tree.MostSpecificContains(vpnIp)
|
||||||
|
if value == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
calculatedRemotes := value.([]*calculatedRemote)
|
||||||
|
|
||||||
|
var calculated []*Ip4AndPort
|
||||||
|
for _, cr := range calculatedRemotes {
|
||||||
|
c := cr.Apply(vpnIp)
|
||||||
|
if c != nil {
|
||||||
|
calculated = append(calculated, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
am := lh.unlockedGetRemoteList(vpnIp)
|
am := lh.unlockedGetRemoteList(vpnIp)
|
||||||
am.Lock()
|
am.Lock()
|
||||||
defer am.Unlock()
|
defer am.Unlock()
|
||||||
lh.Unlock()
|
lh.Unlock()
|
||||||
|
|
||||||
if ipv4 := toAddr.IP.To4(); ipv4 != nil {
|
am.unlockedSetV4(lh.myVpnIp, vpnIp, calculated, lh.unlockedShouldAddV4)
|
||||||
to := NewIp4AndPort(ipv4, uint32(toAddr.Port))
|
|
||||||
if !lh.unlockedShouldAddV4(vpnIp, to) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
am.unlockedPrependV4(lh.myVpnIp, to)
|
|
||||||
|
|
||||||
} else {
|
return len(calculated) > 0
|
||||||
to := NewIp6AndPort(toAddr.IP, uint32(toAddr.Port))
|
|
||||||
if !lh.unlockedShouldAddV6(vpnIp, to) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
am.unlockedPrependV6(lh.myVpnIp, to)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark it as static in the caller provided map
|
|
||||||
staticList[vpnIp] = struct{}{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// unlockedGetRemoteList assumes you have the lh lock
|
// unlockedGetRemoteList assumes you have the lh lock
|
||||||
func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
|
func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
|
||||||
am, ok := lh.addrMap[vpnIp]
|
am, ok := lh.addrMap[vpnIp]
|
||||||
if !ok {
|
if !ok {
|
||||||
am = NewRemoteList()
|
am = NewRemoteList(func(a netip.Addr) bool { return lh.shouldAdd(vpnIp, a) })
|
||||||
lh.addrMap[vpnIp] = am
|
lh.addrMap[vpnIp] = am
|
||||||
}
|
}
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) shouldAdd(vpnIp iputil.VpnIp, to netip.Addr) bool {
|
||||||
|
switch {
|
||||||
|
case to.Is4():
|
||||||
|
ipBytes := to.As4()
|
||||||
|
ip := iputil.Ip2VpnIp(ipBytes[:])
|
||||||
|
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, ip)
|
||||||
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
|
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case to.Is6():
|
||||||
|
ipBytes := to.As16()
|
||||||
|
|
||||||
|
hi := binary.BigEndian.Uint64(ipBytes[:8])
|
||||||
|
lo := binary.BigEndian.Uint64(ipBytes[8:])
|
||||||
|
allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, hi, lo)
|
||||||
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
|
lh.l.WithField("remoteIp", to).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't check our vpn network here because nebula does not support ipv6 on the inside
|
||||||
|
if !allow {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// unlockedShouldAddV4 checks if to is allowed by our allow list
|
// unlockedShouldAddV4 checks if to is allowed by our allow list
|
||||||
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
|
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
|
||||||
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
|
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
|
||||||
@@ -555,6 +718,14 @@ func NewIp4AndPort(ip net.IP, port uint32) *Ip4AndPort {
|
|||||||
return &ipp
|
return &ipp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewIp4AndPortFromNetIP(ip netip.Addr, port uint16) *Ip4AndPort {
|
||||||
|
v4Addr := ip.As4()
|
||||||
|
return &Ip4AndPort{
|
||||||
|
Ip: binary.BigEndian.Uint32(v4Addr[:]),
|
||||||
|
Port: uint32(port),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
||||||
return &Ip6AndPort{
|
return &Ip6AndPort{
|
||||||
Hi: binary.BigEndian.Uint64(ip[:8]),
|
Hi: binary.BigEndian.Uint64(ip[:8]),
|
||||||
@@ -563,6 +734,14 @@ func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewIp6AndPortFromNetIP(ip netip.Addr, port uint16) *Ip6AndPort {
|
||||||
|
ip6Addr := ip.As16()
|
||||||
|
return &Ip6AndPort{
|
||||||
|
Hi: binary.BigEndian.Uint64(ip6Addr[:8]),
|
||||||
|
Lo: binary.BigEndian.Uint64(ip6Addr[8:]),
|
||||||
|
Port: uint32(port),
|
||||||
|
}
|
||||||
|
}
|
||||||
func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr {
|
func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr {
|
||||||
ip := ipp.Ip
|
ip := ipp.Ip
|
||||||
return udp.NewAddr(
|
return udp.NewAddr(
|
||||||
@@ -575,7 +754,7 @@ func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
|
|||||||
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
|
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f udp.EncWriter) {
|
func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f EncWriter) {
|
||||||
lh.updateParentCtx = ctx
|
lh.updateParentCtx = ctx
|
||||||
lh.updateUdp = f
|
lh.updateUdp = f
|
||||||
|
|
||||||
@@ -601,7 +780,7 @@ func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f udp.EncWriter) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) SendUpdate(f udp.EncWriter) {
|
func (lh *LightHouse) SendUpdate(f EncWriter) {
|
||||||
var v4 []*Ip4AndPort
|
var v4 []*Ip4AndPort
|
||||||
var v6 []*Ip6AndPort
|
var v6 []*Ip6AndPort
|
||||||
|
|
||||||
@@ -706,7 +885,13 @@ func (lhh *LightHouseHandler) resetMeta() *NebulaMeta {
|
|||||||
return lhh.meta
|
return lhh.meta
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w udp.EncWriter) {
|
func lhHandleRequest(lhh *LightHouseHandler, f *Interface) udp.LightHouseHandlerFunc {
|
||||||
|
return func(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte) {
|
||||||
|
lhh.HandleRequest(rAddr, vpnIp, p, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w EncWriter) {
|
||||||
n := lhh.resetMeta()
|
n := lhh.resetMeta()
|
||||||
err := n.Unmarshal(p)
|
err := n.Unmarshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -733,15 +918,18 @@ func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp,
|
|||||||
lhh.handleHostQueryReply(n, vpnIp)
|
lhh.handleHostQueryReply(n, vpnIp)
|
||||||
|
|
||||||
case NebulaMeta_HostUpdateNotification:
|
case NebulaMeta_HostUpdateNotification:
|
||||||
lhh.handleHostUpdateNotification(n, vpnIp)
|
lhh.handleHostUpdateNotification(n, vpnIp, w)
|
||||||
|
|
||||||
case NebulaMeta_HostMovedNotification:
|
case NebulaMeta_HostMovedNotification:
|
||||||
case NebulaMeta_HostPunchNotification:
|
case NebulaMeta_HostPunchNotification:
|
||||||
lhh.handleHostPunchNotification(n, vpnIp, w)
|
lhh.handleHostPunchNotification(n, vpnIp, w)
|
||||||
|
|
||||||
|
case NebulaMeta_HostUpdateNotificationAck:
|
||||||
|
// noop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w udp.EncWriter) {
|
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w EncWriter) {
|
||||||
// Exit if we don't answer queries
|
// Exit if we don't answer queries
|
||||||
if !lhh.lh.amLighthouse {
|
if !lhh.lh.amLighthouse {
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
@@ -846,7 +1034,7 @@ func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, vpnIp iputil.V
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp) {
|
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
|
||||||
if !lhh.lh.amLighthouse {
|
if !lhh.lh.amLighthouse {
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
|
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
|
||||||
@@ -872,9 +1060,22 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp
|
|||||||
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
||||||
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
|
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
|
||||||
am.Unlock()
|
am.Unlock()
|
||||||
|
|
||||||
|
n = lhh.resetMeta()
|
||||||
|
n.Type = NebulaMeta_HostUpdateNotificationAck
|
||||||
|
n.Details.VpnIp = uint32(vpnIp)
|
||||||
|
ln, err := n.MarshalTo(lhh.pb)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host update ack")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lhh.lh.metricTx(NebulaMeta_HostUpdateNotificationAck, 1)
|
||||||
|
w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w udp.EncWriter) {
|
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
|
||||||
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -911,7 +1112,7 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp i
|
|||||||
if lhh.lh.punchy.GetRespond() {
|
if lhh.lh.punchy.GetRespond() {
|
||||||
queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(time.Second * 5)
|
time.Sleep(lhh.lh.punchy.GetRespondDelay())
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.Debugf("Sending a nebula test packet to vpn ip %s", queryVpnIp)
|
lhh.l.Debugf("Sending a nebula test packet to vpn ip %s", queryVpnIp)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -11,6 +12,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
//TODO: Add a test to ensure udpAddr is copied and not reused
|
//TODO: Add a test to ensure udpAddr is copied and not reused
|
||||||
@@ -53,14 +55,14 @@ func Test_lhStaticMapping(t *testing.T) {
|
|||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
|
||||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||||
_, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
|
_, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
lh2 := "10.128.0.3"
|
lh2 := "10.128.0.3"
|
||||||
c = config.NewC(l)
|
c = config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
|
||||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
|
||||||
_, err = NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
|
_, err = NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,14 +71,14 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
|
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
lh, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
if !assert.NoError(b, err) {
|
if !assert.NoError(b, err) {
|
||||||
b.Fatal()
|
b.Fatal()
|
||||||
}
|
}
|
||||||
|
|
||||||
hAddr := udp.NewAddrFromString("4.5.6.7:12345")
|
hAddr := udp.NewAddrFromString("4.5.6.7:12345")
|
||||||
hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
|
hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
|
||||||
lh.addrMap[3] = NewRemoteList()
|
lh.addrMap[3] = NewRemoteList(nil)
|
||||||
lh.addrMap[3].unlockedSetV4(
|
lh.addrMap[3].unlockedSetV4(
|
||||||
3,
|
3,
|
||||||
3,
|
3,
|
||||||
@@ -89,7 +91,7 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
|
|
||||||
rAddr := udp.NewAddrFromString("1.2.2.3:12345")
|
rAddr := udp.NewAddrFromString("1.2.2.3:12345")
|
||||||
rAddr2 := udp.NewAddrFromString("1.2.2.3:12346")
|
rAddr2 := udp.NewAddrFromString("1.2.2.3:12346")
|
||||||
lh.addrMap[2] = NewRemoteList()
|
lh.addrMap[2] = NewRemoteList(nil)
|
||||||
lh.addrMap[2].unlockedSetV4(
|
lh.addrMap[2].unlockedSetV4(
|
||||||
3,
|
3,
|
||||||
3,
|
3,
|
||||||
@@ -162,7 +164,7 @@ func TestLighthouse_Memory(t *testing.T) {
|
|||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
lhh := lh.NewRequestHandler()
|
lhh := lh.NewRequestHandler()
|
||||||
|
|
||||||
@@ -238,11 +240,20 @@ func TestLighthouse_reload(t *testing.T) {
|
|||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
c.Settings["static_host_map"] = map[interface{}]interface{}{"10.128.0.2": []interface{}{"1.1.1.1:4242"}}
|
nc := map[interface{}]interface{}{
|
||||||
lh.reload(c, false)
|
"static_host_map": map[interface{}]interface{}{
|
||||||
|
"10.128.0.2": []interface{}{"1.1.1.1:4242"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rc, err := yaml.Marshal(nc)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
c.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
|
err = lh.reload(c, false)
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
|
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
|
||||||
@@ -372,11 +383,28 @@ type testEncWriter struct {
|
|||||||
metaFilter *NebulaMeta_MessageType
|
metaFilter *NebulaMeta_MessageType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) {
|
func (tw *testEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
||||||
}
|
}
|
||||||
func (tw *testEncWriter) Handshake(vpnIp iputil.VpnIp) {
|
func (tw *testEncWriter) Handshake(vpnIp iputil.VpnIp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tw *testEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, _, _ []byte) {
|
||||||
|
msg := &NebulaMeta{}
|
||||||
|
err := msg.Unmarshal(p)
|
||||||
|
if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
|
||||||
|
tw.lastReply = testLhReply{
|
||||||
|
nebType: t,
|
||||||
|
nebSubType: st,
|
||||||
|
vpnIp: hostinfo.vpnIp,
|
||||||
|
msg: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
|
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
|
||||||
msg := &NebulaMeta{}
|
msg := &NebulaMeta{}
|
||||||
err := msg.Unmarshal(p)
|
err := msg.Unmarshal(p)
|
||||||
|
|||||||
34
main.go
34
main.go
@@ -151,8 +151,21 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
port := c.GetInt("listen.port", 0)
|
port := c.GetInt("listen.port", 0)
|
||||||
|
|
||||||
if !configTest {
|
if !configTest {
|
||||||
|
rawListenHost := c.GetString("listen.host", "0.0.0.0")
|
||||||
|
var listenHost *net.IPAddr
|
||||||
|
if rawListenHost == "[::]" {
|
||||||
|
// Old guidance was to provide the literal `[::]` in `listen.host` but that won't resolve.
|
||||||
|
listenHost = &net.IPAddr{IP: net.IPv6zero}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
listenHost, err = net.ResolveIPAddr("ip", rawListenHost)
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.NewContextualError("Failed to resolve listen.host", nil, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < routines; i++ {
|
for i := 0; i < routines; i++ {
|
||||||
udpServer, err := udp.NewListener(l, c.GetString("listen.host", "0.0.0.0"), port, routines > 1, c.GetInt("listen.batch", 64))
|
udpServer, err := udp.NewListener(l, listenHost.IP, port, routines > 1, c.GetInt("listen.batch", 64))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
||||||
}
|
}
|
||||||
@@ -202,7 +215,10 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
hostMap := NewHostMap(l, "main", tunCidr, preferredRanges)
|
hostMap := NewHostMap(l, "main", tunCidr, preferredRanges)
|
||||||
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
|
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
|
||||||
|
|
||||||
l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created")
|
l.
|
||||||
|
WithField("network", hostMap.vpnCIDR.String()).
|
||||||
|
WithField("preferredRanges", hostMap.preferredRanges).
|
||||||
|
Info("Main HostMap created")
|
||||||
|
|
||||||
/*
|
/*
|
||||||
config.SetDefault("promoter.interval", 10)
|
config.SetDefault("promoter.interval", 10)
|
||||||
@@ -210,12 +226,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
punchy := NewPunchyFromConfig(l, c)
|
punchy := NewPunchyFromConfig(l, c)
|
||||||
if punchy.GetPunch() && !configTest {
|
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
||||||
l.Info("UDP hole punching enabled")
|
|
||||||
go hostMap.Punchy(ctx, udpConns[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
lightHouse, err := NewLightHouseFromConfig(l, c, tunCidr, udpConns[0], punchy)
|
|
||||||
switch {
|
switch {
|
||||||
case errors.As(err, &util.ContextualError{}):
|
case errors.As(err, &util.ContextualError{}):
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -269,8 +280,8 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
ServeDns: serveDns,
|
ServeDns: serveDns,
|
||||||
HandshakeManager: handshakeManager,
|
HandshakeManager: handshakeManager,
|
||||||
lightHouse: lightHouse,
|
lightHouse: lightHouse,
|
||||||
checkInterval: checkInterval,
|
checkInterval: time.Second * time.Duration(checkInterval),
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
|
||||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||||
routines: routines,
|
routines: routines,
|
||||||
@@ -279,6 +290,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
caPool: caPool,
|
caPool: caPool,
|
||||||
disconnectInvalid: c.GetBool("pki.disconnect_invalid", false),
|
disconnectInvalid: c.GetBool("pki.disconnect_invalid", false),
|
||||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||||
|
punchy: punchy,
|
||||||
|
|
||||||
ConntrackCacheTimeout: conntrackCacheTimeout,
|
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||||
l: l,
|
l: l,
|
||||||
@@ -327,7 +339,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
//TODO: check if we _should_ be emitting stats
|
//TODO: check if we _should_ be emitting stats
|
||||||
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
||||||
|
|
||||||
attachCommands(l, ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
|
attachCommands(l, c, ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
|
||||||
|
|
||||||
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
||||||
var dnsStart func()
|
var dnsStart func()
|
||||||
|
|||||||
@@ -84,6 +84,7 @@ func newLighthouseMetrics() *MessageMetrics {
|
|||||||
NebulaMeta_HostQueryReply,
|
NebulaMeta_HostQueryReply,
|
||||||
NebulaMeta_HostUpdateNotification,
|
NebulaMeta_HostUpdateNotification,
|
||||||
NebulaMeta_HostPunchNotification,
|
NebulaMeta_HostPunchNotification,
|
||||||
|
NebulaMeta_HostUpdateNotificationAck,
|
||||||
}
|
}
|
||||||
for _, i := range used {
|
for _, i := range used {
|
||||||
h[i] = []metrics.Counter{metrics.GetOrRegisterCounter(fmt.Sprintf("lighthouse.%s.%s", t, i.String()), nil)}
|
h[i] = []metrics.Counter{metrics.GetOrRegisterCounter(fmt.Sprintf("lighthouse.%s.%s", t, i.String()), nil)}
|
||||||
|
|||||||
154
nebula.pb.go
154
nebula.pb.go
@@ -25,42 +25,45 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
|||||||
type NebulaMeta_MessageType int32
|
type NebulaMeta_MessageType int32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
NebulaMeta_None NebulaMeta_MessageType = 0
|
NebulaMeta_None NebulaMeta_MessageType = 0
|
||||||
NebulaMeta_HostQuery NebulaMeta_MessageType = 1
|
NebulaMeta_HostQuery NebulaMeta_MessageType = 1
|
||||||
NebulaMeta_HostQueryReply NebulaMeta_MessageType = 2
|
NebulaMeta_HostQueryReply NebulaMeta_MessageType = 2
|
||||||
NebulaMeta_HostUpdateNotification NebulaMeta_MessageType = 3
|
NebulaMeta_HostUpdateNotification NebulaMeta_MessageType = 3
|
||||||
NebulaMeta_HostMovedNotification NebulaMeta_MessageType = 4
|
NebulaMeta_HostMovedNotification NebulaMeta_MessageType = 4
|
||||||
NebulaMeta_HostPunchNotification NebulaMeta_MessageType = 5
|
NebulaMeta_HostPunchNotification NebulaMeta_MessageType = 5
|
||||||
NebulaMeta_HostWhoami NebulaMeta_MessageType = 6
|
NebulaMeta_HostWhoami NebulaMeta_MessageType = 6
|
||||||
NebulaMeta_HostWhoamiReply NebulaMeta_MessageType = 7
|
NebulaMeta_HostWhoamiReply NebulaMeta_MessageType = 7
|
||||||
NebulaMeta_PathCheck NebulaMeta_MessageType = 8
|
NebulaMeta_PathCheck NebulaMeta_MessageType = 8
|
||||||
NebulaMeta_PathCheckReply NebulaMeta_MessageType = 9
|
NebulaMeta_PathCheckReply NebulaMeta_MessageType = 9
|
||||||
|
NebulaMeta_HostUpdateNotificationAck NebulaMeta_MessageType = 10
|
||||||
)
|
)
|
||||||
|
|
||||||
var NebulaMeta_MessageType_name = map[int32]string{
|
var NebulaMeta_MessageType_name = map[int32]string{
|
||||||
0: "None",
|
0: "None",
|
||||||
1: "HostQuery",
|
1: "HostQuery",
|
||||||
2: "HostQueryReply",
|
2: "HostQueryReply",
|
||||||
3: "HostUpdateNotification",
|
3: "HostUpdateNotification",
|
||||||
4: "HostMovedNotification",
|
4: "HostMovedNotification",
|
||||||
5: "HostPunchNotification",
|
5: "HostPunchNotification",
|
||||||
6: "HostWhoami",
|
6: "HostWhoami",
|
||||||
7: "HostWhoamiReply",
|
7: "HostWhoamiReply",
|
||||||
8: "PathCheck",
|
8: "PathCheck",
|
||||||
9: "PathCheckReply",
|
9: "PathCheckReply",
|
||||||
|
10: "HostUpdateNotificationAck",
|
||||||
}
|
}
|
||||||
|
|
||||||
var NebulaMeta_MessageType_value = map[string]int32{
|
var NebulaMeta_MessageType_value = map[string]int32{
|
||||||
"None": 0,
|
"None": 0,
|
||||||
"HostQuery": 1,
|
"HostQuery": 1,
|
||||||
"HostQueryReply": 2,
|
"HostQueryReply": 2,
|
||||||
"HostUpdateNotification": 3,
|
"HostUpdateNotification": 3,
|
||||||
"HostMovedNotification": 4,
|
"HostMovedNotification": 4,
|
||||||
"HostPunchNotification": 5,
|
"HostPunchNotification": 5,
|
||||||
"HostWhoami": 6,
|
"HostWhoami": 6,
|
||||||
"HostWhoamiReply": 7,
|
"HostWhoamiReply": 7,
|
||||||
"PathCheck": 8,
|
"PathCheck": 8,
|
||||||
"PathCheckReply": 9,
|
"PathCheckReply": 9,
|
||||||
|
"HostUpdateNotificationAck": 10,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x NebulaMeta_MessageType) String() string {
|
func (x NebulaMeta_MessageType) String() string {
|
||||||
@@ -637,51 +640,52 @@ func init() {
|
|||||||
func init() { proto.RegisterFile("nebula.proto", fileDescriptor_2d65afa7693df5ef) }
|
func init() { proto.RegisterFile("nebula.proto", fileDescriptor_2d65afa7693df5ef) }
|
||||||
|
|
||||||
var fileDescriptor_2d65afa7693df5ef = []byte{
|
var fileDescriptor_2d65afa7693df5ef = []byte{
|
||||||
// 696 bytes of a gzipped FileDescriptorProto
|
// 707 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xcd, 0x6e, 0xd3, 0x4a,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0x4d, 0x6f, 0xda, 0x4a,
|
||||||
0x14, 0x8e, 0x1d, 0xe7, 0xef, 0xa4, 0x49, 0x7d, 0x4f, 0xef, 0xcd, 0x4d, 0xaf, 0xae, 0xac, 0xe0,
|
0x14, 0xc5, 0xc6, 0x7c, 0x5d, 0x02, 0xf1, 0xbb, 0x79, 0x8f, 0x07, 0x4f, 0xaf, 0x16, 0xf5, 0xa2,
|
||||||
0x05, 0xca, 0x2a, 0xad, 0xd2, 0x52, 0xb1, 0x04, 0x82, 0x50, 0x52, 0xb5, 0x55, 0x18, 0x15, 0x90,
|
0x62, 0x45, 0x22, 0x92, 0x46, 0x5d, 0x36, 0xa5, 0xaa, 0x20, 0x4a, 0x22, 0x3a, 0x4a, 0x5b, 0xa9,
|
||||||
0xd8, 0xa0, 0x69, 0x32, 0xd4, 0x56, 0x12, 0x8f, 0x6b, 0x4f, 0x50, 0xf3, 0x16, 0x3c, 0x4c, 0x1f,
|
0x9b, 0x6a, 0x62, 0xa6, 0xc1, 0x02, 0x3c, 0x8e, 0x3d, 0x54, 0xe1, 0x5f, 0xf4, 0xc7, 0xe4, 0x47,
|
||||||
0x82, 0x05, 0x12, 0x5d, 0xb0, 0x60, 0x89, 0xda, 0x17, 0x41, 0x33, 0x76, 0x6c, 0x27, 0x0d, 0xec,
|
0x74, 0xd7, 0x2c, 0xbb, 0xac, 0x92, 0x65, 0x97, 0xfd, 0x03, 0xd5, 0x8c, 0xc1, 0x36, 0x84, 0x76,
|
||||||
0xce, 0xcf, 0xf7, 0xcd, 0x7c, 0xe7, 0x9b, 0x63, 0xc3, 0x96, 0xc7, 0x2e, 0xe6, 0x53, 0xda, 0xf1,
|
0x37, 0xe7, 0xde, 0x73, 0x66, 0xce, 0x9c, 0xb9, 0x36, 0x6c, 0x79, 0xec, 0x62, 0x36, 0xa1, 0x6d,
|
||||||
0x03, 0x2e, 0x38, 0x16, 0xa3, 0xcc, 0xfe, 0xaa, 0x03, 0x9c, 0xa9, 0xf0, 0x94, 0x09, 0x8a, 0x5d,
|
0x3f, 0xe0, 0x82, 0x63, 0x3e, 0x42, 0xf6, 0x0f, 0x1d, 0xe0, 0x4c, 0x2d, 0x4f, 0x99, 0xa0, 0xd8,
|
||||||
0x30, 0xce, 0x17, 0x3e, 0x6b, 0x6a, 0x2d, 0xad, 0x5d, 0xef, 0x5a, 0x9d, 0x98, 0x93, 0x22, 0x3a,
|
0x01, 0xe3, 0x7c, 0xee, 0xb3, 0xba, 0xd6, 0xd4, 0x5a, 0xd5, 0x8e, 0xd5, 0x5e, 0x68, 0x12, 0x46,
|
||||||
0xa7, 0x2c, 0x0c, 0xe9, 0x25, 0x93, 0x28, 0xa2, 0xb0, 0x78, 0x00, 0xa5, 0x97, 0x4c, 0x50, 0x77,
|
0xfb, 0x94, 0x85, 0x21, 0xbd, 0x64, 0x92, 0x45, 0x14, 0x17, 0xf7, 0xa1, 0xf0, 0x92, 0x09, 0xea,
|
||||||
0x1a, 0x36, 0xf5, 0x96, 0xd6, 0xae, 0x76, 0x77, 0x1f, 0xd2, 0x62, 0x00, 0x59, 0x22, 0xed, 0xef,
|
0x4e, 0xc2, 0xba, 0xde, 0xd4, 0x5a, 0xe5, 0x4e, 0xe3, 0xa1, 0x6c, 0x41, 0x20, 0x4b, 0xa6, 0xfd,
|
||||||
0x1a, 0x54, 0x33, 0x47, 0x61, 0x19, 0x8c, 0x33, 0xee, 0x31, 0x33, 0x87, 0x35, 0xa8, 0xf4, 0x79,
|
0x53, 0x83, 0x72, 0x6a, 0x2b, 0x2c, 0x82, 0x71, 0xc6, 0x3d, 0x66, 0x66, 0xb0, 0x02, 0xa5, 0x1e,
|
||||||
0x28, 0x5e, 0xcf, 0x59, 0xb0, 0x30, 0x35, 0x44, 0xa8, 0x27, 0x29, 0x61, 0xfe, 0x74, 0x61, 0xea,
|
0x0f, 0xc5, 0xeb, 0x19, 0x0b, 0xe6, 0xa6, 0x86, 0x08, 0xd5, 0x18, 0x12, 0xe6, 0x4f, 0xe6, 0xa6,
|
||||||
0xf8, 0x1f, 0x34, 0x64, 0xed, 0x8d, 0x3f, 0xa6, 0x82, 0x9d, 0x71, 0xe1, 0x7e, 0x74, 0x47, 0x54,
|
0x8e, 0xff, 0x41, 0x4d, 0xd6, 0xde, 0xf8, 0x43, 0x2a, 0xd8, 0x19, 0x17, 0xee, 0x47, 0xd7, 0xa1,
|
||||||
0xb8, 0xdc, 0x33, 0xf3, 0xb8, 0x0b, 0xff, 0xc8, 0xde, 0x29, 0xff, 0xc4, 0xc6, 0x2b, 0x2d, 0x63,
|
0xc2, 0xe5, 0x9e, 0x99, 0xc5, 0x06, 0xfc, 0x23, 0x7b, 0xa7, 0xfc, 0x13, 0x1b, 0xae, 0xb4, 0x8c,
|
||||||
0xd9, 0x1a, 0xce, 0xbd, 0x91, 0xb3, 0xd2, 0x2a, 0x60, 0x1d, 0x40, 0xb6, 0xde, 0x39, 0x9c, 0xce,
|
0x65, 0x6b, 0x30, 0xf3, 0x9c, 0xd1, 0x4a, 0x2b, 0x87, 0x55, 0x00, 0xd9, 0x7a, 0x37, 0xe2, 0x74,
|
||||||
0x5c, 0xb3, 0x88, 0x3b, 0xb0, 0x9d, 0xe6, 0xd1, 0xb5, 0x25, 0xa9, 0x6c, 0x48, 0x85, 0xd3, 0x73,
|
0xea, 0x9a, 0x79, 0xdc, 0x81, 0xed, 0x04, 0x47, 0xc7, 0x16, 0xa4, 0xb3, 0x01, 0x15, 0xa3, 0xee,
|
||||||
0xd8, 0x68, 0x62, 0x96, 0xa5, 0xb2, 0x24, 0x8d, 0x20, 0x15, 0xfb, 0x9b, 0x06, 0x7f, 0x3d, 0x98,
|
0x88, 0x39, 0x63, 0xb3, 0x28, 0x9d, 0xc5, 0x30, 0xa2, 0x94, 0xf0, 0x11, 0x34, 0x36, 0x3b, 0x3b,
|
||||||
0x1a, 0xff, 0x86, 0xc2, 0x5b, 0xdf, 0x1b, 0xf8, 0xca, 0xd6, 0x1a, 0x89, 0x12, 0x3c, 0x84, 0xea,
|
0x72, 0xc6, 0x26, 0xd8, 0x5f, 0x35, 0xf8, 0xeb, 0x41, 0x28, 0xf8, 0x37, 0xe4, 0xde, 0xfa, 0x5e,
|
||||||
0xc0, 0x3f, 0x7c, 0xee, 0x8d, 0x87, 0x3c, 0x10, 0xd2, 0xbb, 0x7c, 0xbb, 0xda, 0xc5, 0xa5, 0x77,
|
0xdf, 0x57, 0xa9, 0x57, 0x48, 0x04, 0xf0, 0x00, 0xca, 0x7d, 0xff, 0xe0, 0xc8, 0x1b, 0x0e, 0x78,
|
||||||
0x69, 0x8b, 0x64, 0x61, 0x11, 0xeb, 0x28, 0x61, 0x19, 0xeb, 0xac, 0xa3, 0x0c, 0x2b, 0x81, 0xa1,
|
0x20, 0x64, 0xb4, 0xd9, 0x56, 0xb9, 0x83, 0xcb, 0x68, 0x93, 0x16, 0x49, 0xd3, 0x22, 0xd5, 0x61,
|
||||||
0x05, 0x40, 0xd8, 0x94, 0x2e, 0x22, 0x19, 0x85, 0x56, 0xbe, 0x5d, 0x23, 0x99, 0x0a, 0x36, 0xa1,
|
0xac, 0x32, 0xd6, 0x55, 0x87, 0x29, 0x55, 0x4c, 0x43, 0x0b, 0x80, 0xb0, 0x09, 0x9d, 0x47, 0x36,
|
||||||
0x34, 0xe2, 0x73, 0x4f, 0xb0, 0xa0, 0x99, 0x57, 0x1a, 0x97, 0xa9, 0xbd, 0x0f, 0x90, 0x5e, 0x8f,
|
0x72, 0xcd, 0x6c, 0xab, 0x42, 0x52, 0x15, 0xac, 0x43, 0xc1, 0xe1, 0x33, 0x4f, 0xb0, 0xa0, 0x9e,
|
||||||
0x75, 0xd0, 0x93, 0x31, 0xf4, 0x81, 0x8f, 0x08, 0x86, 0xac, 0xab, 0x87, 0xaf, 0x11, 0x15, 0xdb,
|
0x55, 0x1e, 0x97, 0xd0, 0xde, 0x03, 0x48, 0x8e, 0xc7, 0x2a, 0xe8, 0xf1, 0x35, 0xf4, 0xbe, 0x8f,
|
||||||
0xcf, 0x24, 0xe3, 0x28, 0xc3, 0xe8, 0xbb, 0x8a, 0x61, 0x10, 0xbd, 0xef, 0xca, 0xfc, 0x84, 0x2b,
|
0x08, 0x86, 0xac, 0xab, 0xb9, 0xa8, 0x10, 0xb5, 0xb6, 0x9f, 0x4b, 0xc5, 0x61, 0x4a, 0xd1, 0x73,
|
||||||
0xbc, 0x41, 0xf4, 0x13, 0x9e, 0x9c, 0x90, 0xcf, 0x9c, 0x70, 0xbd, 0xdc, 0xc9, 0xa1, 0xeb, 0x5d,
|
0x95, 0xc2, 0x20, 0x7a, 0xcf, 0x95, 0xf8, 0x84, 0x2b, 0xbe, 0x41, 0xf4, 0x13, 0x1e, 0xef, 0x90,
|
||||||
0xfe, 0x79, 0x27, 0x25, 0x62, 0xc3, 0x4e, 0x22, 0x18, 0xe7, 0xee, 0x8c, 0xc5, 0xf7, 0xa8, 0xd8,
|
0x4d, 0xed, 0x70, 0xbd, 0x1c, 0xd9, 0x81, 0xeb, 0x5d, 0xfe, 0x79, 0x64, 0x25, 0x63, 0xc3, 0xc8,
|
||||||
0xb6, 0x1f, 0x6c, 0x9c, 0x24, 0x9b, 0x39, 0xac, 0x40, 0x21, 0x7a, 0x3f, 0xcd, 0xfe, 0x00, 0xdb,
|
0x22, 0x18, 0xe7, 0xee, 0x94, 0x2d, 0xce, 0x51, 0x6b, 0xdb, 0x7e, 0x30, 0x90, 0x52, 0x6c, 0x66,
|
||||||
0xd1, 0xb9, 0x7d, 0xea, 0x8d, 0x43, 0x87, 0x4e, 0x18, 0x3e, 0x4d, 0xd7, 0x5b, 0x53, 0xeb, 0xbd,
|
0xb0, 0x04, 0xb9, 0xe8, 0x79, 0x35, 0xfb, 0x03, 0x6c, 0x47, 0xfb, 0xf6, 0xa8, 0x37, 0x0c, 0x47,
|
||||||
0xa6, 0x20, 0x41, 0xae, 0xef, 0xb8, 0x14, 0xd1, 0x9f, 0xd1, 0x91, 0x12, 0xb1, 0x45, 0x54, 0x6c,
|
0x74, 0xcc, 0xf0, 0x59, 0x32, 0xfd, 0x9a, 0x9a, 0xfe, 0x35, 0x07, 0x31, 0x73, 0xfd, 0x13, 0x90,
|
||||||
0xdf, 0x68, 0xd0, 0xd8, 0xcc, 0x93, 0xf0, 0x1e, 0x0b, 0x84, 0xba, 0x65, 0x8b, 0xa8, 0x18, 0x1f,
|
0x26, 0x7a, 0x53, 0xea, 0x28, 0x13, 0x5b, 0x44, 0xad, 0xed, 0x1b, 0x0d, 0x6a, 0x9b, 0x75, 0x92,
|
||||||
0x43, 0x7d, 0xe0, 0xb9, 0xc2, 0xa5, 0x82, 0x07, 0x03, 0x6f, 0xcc, 0xae, 0x63, 0xa7, 0xd7, 0xaa,
|
0xde, 0x65, 0x81, 0x50, 0xa7, 0x6c, 0x11, 0xb5, 0xc6, 0x27, 0x50, 0xed, 0x7b, 0xae, 0x70, 0xa9,
|
||||||
0x12, 0x47, 0x58, 0xe8, 0x73, 0x6f, 0xcc, 0x62, 0x5c, 0xe4, 0xe7, 0x5a, 0x15, 0x1b, 0x50, 0xec,
|
0xe0, 0x41, 0xdf, 0x1b, 0xb2, 0xeb, 0x45, 0xd2, 0x6b, 0x55, 0xc9, 0x23, 0x2c, 0xf4, 0xb9, 0x37,
|
||||||
0x71, 0x3e, 0x71, 0x59, 0xd3, 0x50, 0xce, 0xc4, 0x59, 0xe2, 0x57, 0x21, 0xf5, 0xeb, 0xd8, 0x28,
|
0x64, 0x0b, 0x5e, 0x94, 0xe7, 0x5a, 0x15, 0x6b, 0x90, 0xef, 0x72, 0x3e, 0x76, 0x59, 0xdd, 0x50,
|
||||||
0x17, 0xcd, 0xd2, 0xb1, 0x51, 0x2e, 0x99, 0x65, 0xfb, 0x46, 0x87, 0x5a, 0x24, 0xbb, 0xc7, 0x3d,
|
0xc9, 0x2c, 0x50, 0x9c, 0x57, 0x2e, 0xc9, 0xeb, 0xd8, 0x28, 0xe6, 0xcd, 0xc2, 0xb1, 0x51, 0x2c,
|
||||||
0x11, 0xf0, 0x29, 0x3e, 0x59, 0x79, 0x95, 0x47, 0xab, 0x9e, 0xc4, 0xa0, 0x0d, 0x0f, 0xb3, 0x0f,
|
0x98, 0x45, 0xfb, 0x46, 0x87, 0x4a, 0x64, 0xbb, 0xcb, 0x3d, 0x11, 0xf0, 0x09, 0x3e, 0x5d, 0x79,
|
||||||
0x3b, 0x89, 0x74, 0xb5, 0x7f, 0xd9, 0xa9, 0x36, 0xb5, 0x24, 0x23, 0x19, 0x22, 0xc3, 0x88, 0xe6,
|
0x95, 0xc7, 0xab, 0x99, 0x2c, 0x48, 0x1b, 0x1e, 0x66, 0x0f, 0x76, 0x62, 0xeb, 0x6a, 0xfe, 0xd2,
|
||||||
0xdb, 0xd4, 0xc2, 0xff, 0xa1, 0xa2, 0xb2, 0x73, 0x3e, 0xf0, 0xd5, 0x9c, 0x35, 0x92, 0x16, 0xb0,
|
0xb7, 0xda, 0xd4, 0x92, 0x8a, 0xf8, 0x12, 0x29, 0x45, 0x74, 0xbf, 0x4d, 0x2d, 0xfc, 0x1f, 0x4a,
|
||||||
0x05, 0x55, 0x95, 0xbc, 0x0a, 0xf8, 0x4c, 0x7d, 0x0b, 0xb2, 0x9f, 0x2d, 0xd9, 0xfd, 0xdf, 0xfd,
|
0x0a, 0x9d, 0xf3, 0xbe, 0xaf, 0xee, 0x59, 0x21, 0x49, 0x01, 0x9b, 0x50, 0x56, 0xe0, 0x55, 0xc0,
|
||||||
0x9a, 0x1a, 0x80, 0xbd, 0x80, 0x51, 0xc1, 0x14, 0x9a, 0xb0, 0xab, 0x39, 0x0b, 0x85, 0xa9, 0xe1,
|
0xa7, 0xea, 0x5b, 0x90, 0xfd, 0x74, 0xc9, 0xee, 0xfd, 0xee, 0xcf, 0x55, 0x03, 0xec, 0x06, 0x8c,
|
||||||
0xbf, 0xb0, 0xb3, 0x52, 0x97, 0x92, 0x42, 0x66, 0xea, 0x2f, 0x0e, 0xbe, 0xdc, 0x59, 0xda, 0xed,
|
0x0a, 0xa6, 0xd8, 0x84, 0x5d, 0xcd, 0x58, 0x28, 0x4c, 0x0d, 0xff, 0x85, 0x9d, 0x95, 0xba, 0xb4,
|
||||||
0x9d, 0xa5, 0xfd, 0xbc, 0xb3, 0xb4, 0xcf, 0xf7, 0x56, 0xee, 0xf6, 0xde, 0xca, 0xfd, 0xb8, 0xb7,
|
0x14, 0x32, 0x53, 0x7f, 0xb1, 0xff, 0xe5, 0xce, 0xd2, 0x6e, 0xef, 0x2c, 0xed, 0xfb, 0x9d, 0xa5,
|
||||||
0x72, 0xef, 0x77, 0x2f, 0x5d, 0xe1, 0xcc, 0x2f, 0x3a, 0x23, 0x3e, 0xdb, 0x0b, 0xa7, 0x74, 0x34,
|
0x7d, 0xbe, 0xb7, 0x32, 0xb7, 0xf7, 0x56, 0xe6, 0xdb, 0xbd, 0x95, 0x79, 0xdf, 0xb8, 0x74, 0xc5,
|
||||||
0x71, 0xae, 0xf6, 0x22, 0x0b, 0x2f, 0x8a, 0xea, 0x0f, 0x7d, 0xf0, 0x2b, 0x00, 0x00, 0xff, 0xff,
|
0x68, 0x76, 0xd1, 0x76, 0xf8, 0x74, 0x37, 0x9c, 0x50, 0x67, 0x3c, 0xba, 0xda, 0x8d, 0x22, 0xbc,
|
||||||
0xcd, 0xd7, 0xbe, 0xd5, 0xb1, 0x05, 0x00, 0x00,
|
0xc8, 0xab, 0x1f, 0xf8, 0xfe, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x17, 0x56, 0x28, 0x74, 0xd0,
|
||||||
|
0x05, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NebulaMeta) Marshal() (dAtA []byte, err error) {
|
func (m *NebulaMeta) Marshal() (dAtA []byte, err error) {
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ message NebulaMeta {
|
|||||||
HostWhoamiReply = 7;
|
HostWhoamiReply = 7;
|
||||||
PathCheck = 8;
|
PathCheck = 8;
|
||||||
PathCheckReply = 9;
|
PathCheckReply = 9;
|
||||||
|
HostUpdateNotificationAck = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageType Type = 1;
|
MessageType Type = 1;
|
||||||
|
|||||||
80
noiseutil/boring.go
Normal file
80
noiseutil/boring.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
//go:build boringcrypto
|
||||||
|
// +build boringcrypto
|
||||||
|
|
||||||
|
package noiseutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
// unsafe needed for go:linkname
|
||||||
|
_ "unsafe"
|
||||||
|
|
||||||
|
"github.com/flynn/noise"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncryptLockNeeded indicates if calls to Encrypt need a lock
|
||||||
|
// This is true for boringcrypto because the Seal function verifies that the
|
||||||
|
// nonce is strictly increasing.
|
||||||
|
const EncryptLockNeeded = true
|
||||||
|
|
||||||
|
// NewGCMTLS is no longer exposed in go1.19+, so we need to link it in
|
||||||
|
// See: https://github.com/golang/go/issues/56326
|
||||||
|
//
|
||||||
|
// NewGCMTLS is the internal method used with boringcrypto that provices a
|
||||||
|
// validated mode of AES-GCM which enforces the nonce is strictly
|
||||||
|
// monotonically increasing. This is the TLS 1.2 specification for nonce
|
||||||
|
// generation (which also matches the method used by the Noise Protocol)
|
||||||
|
//
|
||||||
|
// - https://github.com/golang/go/blob/go1.19/src/crypto/tls/cipher_suites.go#L520-L522
|
||||||
|
// - https://github.com/golang/go/blob/go1.19/src/crypto/internal/boring/aes.go#L235-L237
|
||||||
|
// - https://github.com/golang/go/blob/go1.19/src/crypto/internal/boring/aes.go#L250
|
||||||
|
// - https://github.com/google/boringssl/blob/ae223d6138807a13006342edfeef32e813246b39/include/openssl/aead.h#L379-L381
|
||||||
|
// - https://github.com/google/boringssl/blob/ae223d6138807a13006342edfeef32e813246b39/crypto/fipsmodule/cipher/e_aes.c#L1082-L1093
|
||||||
|
//
|
||||||
|
//go:linkname newGCMTLS crypto/internal/boring.NewGCMTLS
|
||||||
|
func newGCMTLS(c cipher.Block) (cipher.AEAD, error)
|
||||||
|
|
||||||
|
type cipherFn struct {
|
||||||
|
fn func([32]byte) noise.Cipher
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c cipherFn) Cipher(k [32]byte) noise.Cipher { return c.fn(k) }
|
||||||
|
func (c cipherFn) CipherName() string { return c.name }
|
||||||
|
|
||||||
|
// CipherAESGCM is the AES256-GCM AEAD cipher (using NewGCMTLS when GoBoring is present)
|
||||||
|
var CipherAESGCM noise.CipherFunc = cipherFn{cipherAESGCMBoring, "AESGCM"}
|
||||||
|
|
||||||
|
func cipherAESGCMBoring(k [32]byte) noise.Cipher {
|
||||||
|
c, err := aes.NewCipher(k[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
gcm, err := newGCMTLS(c)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return aeadCipher{
|
||||||
|
gcm,
|
||||||
|
func(n uint64) []byte {
|
||||||
|
var nonce [12]byte
|
||||||
|
binary.BigEndian.PutUint64(nonce[4:], n)
|
||||||
|
return nonce[:]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type aeadCipher struct {
|
||||||
|
cipher.AEAD
|
||||||
|
nonce func(uint64) []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c aeadCipher) Encrypt(out []byte, n uint64, ad, plaintext []byte) []byte {
|
||||||
|
return c.Seal(out, c.nonce(n), plaintext, ad)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c aeadCipher) Decrypt(out []byte, n uint64, ad, ciphertext []byte) ([]byte, error) {
|
||||||
|
return c.Open(out, c.nonce(n), ciphertext, ad)
|
||||||
|
}
|
||||||
46
noiseutil/boring_test.go
Normal file
46
noiseutil/boring_test.go
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
//go:build boringcrypto
|
||||||
|
// +build boringcrypto
|
||||||
|
|
||||||
|
package noiseutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/boring"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEncryptLockNeeded(t *testing.T) {
|
||||||
|
assert.True(t, EncryptLockNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure NewGCMTLS validates the nonce is non-repeating
|
||||||
|
func TestNewGCMTLS(t *testing.T) {
|
||||||
|
assert.True(t, boring.Enabled())
|
||||||
|
|
||||||
|
// Test Case 16 from GCM Spec:
|
||||||
|
// - (now dead link): http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-spec.pdf
|
||||||
|
// - as listed in boringssl tests: https://github.com/google/boringssl/blob/fips-20220613/crypto/cipher_extra/test/cipher_tests.txt#L412-L418
|
||||||
|
key, _ := hex.DecodeString("feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308")
|
||||||
|
iv, _ := hex.DecodeString("cafebabefacedbaddecaf888")
|
||||||
|
plaintext, _ := hex.DecodeString("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39")
|
||||||
|
aad, _ := hex.DecodeString("feedfacedeadbeeffeedfacedeadbeefabaddad2")
|
||||||
|
expected, _ := hex.DecodeString("522dc1f099567d07f47f37a32a84427d643a8cdcbfe5c0c97598a2bd2555d1aa8cb08e48590dbb3da7b08b1056828838c5f61e6393ba7a0abcc9f662")
|
||||||
|
expectedTag, _ := hex.DecodeString("76fc6ece0f4e1768cddf8853bb2d551b")
|
||||||
|
|
||||||
|
expected = append(expected, expectedTag...)
|
||||||
|
|
||||||
|
var keyArray [32]byte
|
||||||
|
copy(keyArray[:], key)
|
||||||
|
c := CipherAESGCM.Cipher(keyArray)
|
||||||
|
aead := c.(aeadCipher).AEAD
|
||||||
|
|
||||||
|
dst := aead.Seal([]byte{}, iv, plaintext, aad)
|
||||||
|
assert.Equal(t, expected, dst)
|
||||||
|
|
||||||
|
// We expect this to fail since we are re-encrypting with a repeat IV
|
||||||
|
assert.PanicsWithError(t, "boringcrypto: EVP_AEAD_CTX_seal failed", func() {
|
||||||
|
dst = aead.Seal([]byte{}, iv, plaintext, aad)
|
||||||
|
})
|
||||||
|
}
|
||||||
68
noiseutil/nist.go
Normal file
68
noiseutil/nist.go
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
package noiseutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdh"
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/flynn/noise"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DHP256 is the NIST P-256 ECDH function
|
||||||
|
var DHP256 noise.DHFunc = newNISTCurve("P256", ecdh.P256(), 32)
|
||||||
|
|
||||||
|
type nistCurve struct {
|
||||||
|
name string
|
||||||
|
curve ecdh.Curve
|
||||||
|
dhLen int
|
||||||
|
pubLen int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNISTCurve(name string, curve ecdh.Curve, byteLen int) nistCurve {
|
||||||
|
return nistCurve{
|
||||||
|
name: name,
|
||||||
|
curve: curve,
|
||||||
|
dhLen: byteLen,
|
||||||
|
// Standard uncompressed format, type (1 byte) plus both coordinates
|
||||||
|
pubLen: 1 + 2*byteLen,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c nistCurve) GenerateKeypair(rng io.Reader) (noise.DHKey, error) {
|
||||||
|
if rng == nil {
|
||||||
|
rng = rand.Reader
|
||||||
|
}
|
||||||
|
privkey, err := c.curve.GenerateKey(rng)
|
||||||
|
if err != nil {
|
||||||
|
return noise.DHKey{}, err
|
||||||
|
}
|
||||||
|
pubkey := privkey.PublicKey()
|
||||||
|
return noise.DHKey{Private: privkey.Bytes(), Public: pubkey.Bytes()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c nistCurve) DH(privkey, pubkey []byte) ([]byte, error) {
|
||||||
|
ecdhPubKey, err := c.curve.NewPublicKey(pubkey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to unmarshal pubkey: %w", err)
|
||||||
|
}
|
||||||
|
ecdhPrivKey, err := c.curve.NewPrivateKey(privkey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to unmarshal pubkey: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ecdhPrivKey.ECDH(ecdhPubKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c nistCurve) DHLen() int {
|
||||||
|
// NOTE: Noise Protocol specifies "DHLen" to represent two things:
|
||||||
|
// - The size of the public key
|
||||||
|
// - The return size of the DH() function
|
||||||
|
// But for standard NIST ECDH, the sizes of these are different.
|
||||||
|
// Luckily, the flynn/noise library actually only uses this DHLen()
|
||||||
|
// value to represent the public key size, so that is what we are
|
||||||
|
// returning here. The length of the DH() return bytes are unaffected by
|
||||||
|
// this value here.
|
||||||
|
return c.pubLen
|
||||||
|
}
|
||||||
|
func (c nistCurve) DHName() string { return c.name }
|
||||||
14
noiseutil/notboring.go
Normal file
14
noiseutil/notboring.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
//go:build !boringcrypto
|
||||||
|
// +build !boringcrypto
|
||||||
|
|
||||||
|
package noiseutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/flynn/noise"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncryptLockNeeded indicates if calls to Encrypt need a lock
|
||||||
|
const EncryptLockNeeded = false
|
||||||
|
|
||||||
|
// CipherAESGCM is the standard noise.CipherAESGCM when boringcrypto is not enabled
|
||||||
|
var CipherAESGCM noise.CipherFunc = noise.CipherAESGCM
|
||||||
14
noiseutil/notboring_test.go
Normal file
14
noiseutil/notboring_test.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
//go:build !boringcrypto
|
||||||
|
// +build !boringcrypto
|
||||||
|
|
||||||
|
package noiseutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEncryptLockNeeded(t *testing.T) {
|
||||||
|
assert.False(t, EncryptLockNeeded)
|
||||||
|
}
|
||||||
6
notboring.go
Normal file
6
notboring.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
//go:build !boringcrypto
|
||||||
|
// +build !boringcrypto
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
var boringEnabled = func() bool { return false }
|
||||||
85
outside.go
85
outside.go
@@ -21,7 +21,23 @@ const (
|
|||||||
minFwPacketLen = 4
|
minFwPacketLen = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
|
func readOutsidePackets(f *Interface) udp.EncReader {
|
||||||
|
return func(
|
||||||
|
addr *udp.Addr,
|
||||||
|
out []byte,
|
||||||
|
packet []byte,
|
||||||
|
header *header.H,
|
||||||
|
fwPacket *firewall.Packet,
|
||||||
|
lhh udp.LightHouseHandlerFunc,
|
||||||
|
nb []byte,
|
||||||
|
q int,
|
||||||
|
localCache firewall.ConntrackCache,
|
||||||
|
) {
|
||||||
|
f.readOutsidePackets(addr, nil, out, packet, header, fwPacket, lhh, nb, q, localCache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) readOutsidePackets(addr *udp.Addr, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
err := h.Parse(packet)
|
err := h.Parse(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: best if we return this and let caller log
|
// TODO: best if we return this and let caller log
|
||||||
@@ -34,6 +50,16 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
|
|||||||
}
|
}
|
||||||
|
|
||||||
//l.Error("in packet ", header, packet[HeaderLen:])
|
//l.Error("in packet ", header, packet[HeaderLen:])
|
||||||
|
if addr != nil {
|
||||||
|
if ip4 := addr.IP.To4(); ip4 != nil {
|
||||||
|
if ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, iputil.VpnIp(binary.BigEndian.Uint32(ip4))) {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("udpAddr", addr).Debug("Refusing to process double encrypted packet")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var hostinfo *HostInfo
|
var hostinfo *HostInfo
|
||||||
// verify if we've seen this index before, otherwise respond to the handshake initiation
|
// verify if we've seen this index before, otherwise respond to the handshake initiation
|
||||||
@@ -57,7 +83,9 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
|
|||||||
|
|
||||||
switch h.Subtype {
|
switch h.Subtype {
|
||||||
case header.MessageNone:
|
case header.MessageNone:
|
||||||
f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache)
|
if !f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache) {
|
||||||
|
return
|
||||||
|
}
|
||||||
case header.MessageRelay:
|
case header.MessageRelay:
|
||||||
// The entire body is sent as AD, not encrypted.
|
// The entire body is sent as AD, not encrypted.
|
||||||
// The packet consists of a 16-byte parsed Nebula header, Associated Data-protected payload, and a trailing 16-byte AEAD signature value.
|
// The packet consists of a 16-byte parsed Nebula header, Associated Data-protected payload, and a trailing 16-byte AEAD signature value.
|
||||||
@@ -74,17 +102,15 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
|
|||||||
signedPayload = signedPayload[header.Len:]
|
signedPayload = signedPayload[header.Len:]
|
||||||
// Pull the Roaming parts up here, and return in all call paths.
|
// Pull the Roaming parts up here, and return in all call paths.
|
||||||
f.handleHostRoaming(hostinfo, addr)
|
f.handleHostRoaming(hostinfo, addr)
|
||||||
f.connectionManager.In(hostinfo.vpnIp)
|
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
||||||
|
f.connectionManager.In(hostinfo.localIndexId)
|
||||||
|
f.connectionManager.RelayUsed(h.RemoteIndex)
|
||||||
|
|
||||||
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
||||||
if !ok {
|
if !ok {
|
||||||
// The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing
|
// The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing
|
||||||
// its internal mapping. This shouldn't happen!
|
// its internal mapping. This should never happen.
|
||||||
hostinfo.logger(f.l).WithField("hostinfo", hostinfo.vpnIp).WithField("remoteIndex", h.RemoteIndex).Errorf("HostInfo missing remote index")
|
hostinfo.logger(f.l).WithFields(logrus.Fields{"vpnIp": hostinfo.vpnIp, "remoteIndex": h.RemoteIndex}).Error("HostInfo missing remote relay index")
|
||||||
// Delete my local index from the hostmap
|
|
||||||
f.hostMap.DeleteRelayIdx(h.RemoteIndex)
|
|
||||||
// When the peer doesn't recieve any return traffic, its connection_manager will eventually clean up
|
|
||||||
// the broken relay when it cleans up the associated HostInfo object.
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,15 +122,9 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
|
|||||||
return
|
return
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
// Find the target HostInfo relay object
|
// Find the target HostInfo relay object
|
||||||
targetHI, err := f.hostMap.QueryVpnIp(relay.PeerIp)
|
targetHI, targetRelay, err := f.hostMap.QueryVpnIpRelayFor(hostinfo.vpnIp, relay.PeerIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithField("peerIp", relay.PeerIp).WithError(err).Info("Failed to find target host info by ip")
|
hostinfo.logger(f.l).WithField("relayTo", relay.PeerIp).WithError(err).Info("Failed to find target host info by ip")
|
||||||
return
|
|
||||||
}
|
|
||||||
// find the target Relay info object
|
|
||||||
targetRelay, ok := targetHI.relayState.QueryRelayForByIp(hostinfo.vpnIp)
|
|
||||||
if !ok {
|
|
||||||
hostinfo.logger(f.l).WithField("peerIp", relay.PeerIp).Info("Failed to find relay in hostinfo")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,7 +140,7 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
|
|||||||
hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal")
|
hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
hostinfo.logger(f.l).WithField("targetRelayState", targetRelay.State).Info("Unexpected target relay state")
|
hostinfo.logger(f.l).WithFields(logrus.Fields{"relayTo": relay.PeerIp, "relayFrom": hostinfo.vpnIp, "targetRelayState": targetRelay.State}).Info("Unexpected target relay state")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -143,7 +163,7 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lhf(addr, hostinfo.vpnIp, d, f)
|
lhf(addr, hostinfo.vpnIp, d)
|
||||||
|
|
||||||
// Fallthrough to the bottom to record incoming traffic
|
// Fallthrough to the bottom to record incoming traffic
|
||||||
|
|
||||||
@@ -227,17 +247,16 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
|
|||||||
|
|
||||||
f.handleHostRoaming(hostinfo, addr)
|
f.handleHostRoaming(hostinfo, addr)
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.vpnIp)
|
f.connectionManager.In(hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
||||||
func (f *Interface) closeTunnel(hostInfo *HostInfo) {
|
func (f *Interface) closeTunnel(hostInfo *HostInfo) {
|
||||||
//TODO: this would be better as a single function in ConnectionManager that handled locks appropriately
|
final := f.hostMap.DeleteHostInfo(hostInfo)
|
||||||
f.connectionManager.ClearIP(hostInfo.vpnIp)
|
if final {
|
||||||
f.connectionManager.ClearPendingDeletion(hostInfo.vpnIp)
|
// We no longer have any tunnels with this vpn ip, clear learned lighthouse state to lower memory usage
|
||||||
f.lightHouse.DeleteVpnIp(hostInfo.vpnIp)
|
f.lightHouse.DeleteVpnIp(hostInfo.vpnIp)
|
||||||
|
}
|
||||||
f.hostMap.DeleteHostInfo(hostInfo)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote
|
// sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote
|
||||||
@@ -361,7 +380,7 @@ func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) bool {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
||||||
@@ -369,37 +388,39 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
||||||
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
||||||
//f.sendRecvError(hostinfo.remote, header.RemoteIndex)
|
//f.sendRecvError(hostinfo.remote, header.RemoteIndex)
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
err = newPacket(out, true, fwPacket)
|
err = newPacket(out, true, fwPacket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
|
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
|
||||||
Warnf("Error while validating inbound packet")
|
Warnf("Error while validating inbound packet")
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
|
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
|
||||||
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
||||||
Debugln("dropping out of window packet")
|
Debugln("dropping out of window packet")
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.caPool, localCache)
|
dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.caPool, localCache)
|
||||||
if dropReason != nil {
|
if dropReason != nil {
|
||||||
|
f.rejectOutside(out, hostinfo.ConnectionState, hostinfo, nb, out, q)
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
||||||
WithField("reason", dropReason).
|
WithField("reason", dropReason).
|
||||||
Debugln("dropping inbound packet")
|
Debugln("dropping inbound packet")
|
||||||
}
|
}
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.vpnIp)
|
f.connectionManager.In(hostinfo.localIndexId)
|
||||||
_, err = f.readers[q].Write(out)
|
_, err = f.readers[q].Write(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).Error("Failed to write to tun")
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) maybeSendRecvError(endpoint *udp.Addr, index uint32) {
|
func (f *Interface) maybeSendRecvError(endpoint *udp.Addr, index uint32) {
|
||||||
|
|||||||
@@ -14,10 +14,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Route struct {
|
type Route struct {
|
||||||
MTU int
|
MTU int
|
||||||
Metric int
|
Metric int
|
||||||
Cidr *net.IPNet
|
Cidr *net.IPNet
|
||||||
Via *iputil.VpnIp
|
Via *iputil.VpnIp
|
||||||
|
Install bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*cidr.Tree4, error) {
|
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*cidr.Tree4, error) {
|
||||||
@@ -81,7 +82,8 @@ func parseRoutes(c *config.C, network *net.IPNet) ([]Route, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := Route{
|
r := Route{
|
||||||
MTU: mtu,
|
Install: true,
|
||||||
|
MTU: mtu,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute))
|
_, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute))
|
||||||
@@ -182,10 +184,20 @@ func parseUnsafeRoutes(c *config.C, network *net.IPNet) ([]Route, error) {
|
|||||||
|
|
||||||
viaVpnIp := iputil.Ip2VpnIp(nVia)
|
viaVpnIp := iputil.Ip2VpnIp(nVia)
|
||||||
|
|
||||||
|
install := true
|
||||||
|
rInstall, ok := m["install"]
|
||||||
|
if ok {
|
||||||
|
install, err = strconv.ParseBool(fmt.Sprintf("%v", rInstall))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("entry %v.install in tun.unsafe_routes is not a boolean: %v", i+1, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
r := Route{
|
r := Route{
|
||||||
Via: &viaVpnIp,
|
Via: &viaVpnIp,
|
||||||
MTU: mtu,
|
MTU: mtu,
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
|
Install: install,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute))
|
_, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute))
|
||||||
|
|||||||
@@ -92,6 +92,8 @@ func Test_parseRoutes(t *testing.T) {
|
|||||||
|
|
||||||
tested := 0
|
tested := 0
|
||||||
for _, r := range routes {
|
for _, r := range routes {
|
||||||
|
assert.True(t, r.Install)
|
||||||
|
|
||||||
if r.MTU == 8000 {
|
if r.MTU == 8000 {
|
||||||
assert.Equal(t, "10.0.0.1/32", r.Cidr.String())
|
assert.Equal(t, "10.0.0.1/32", r.Cidr.String())
|
||||||
tested++
|
tested++
|
||||||
@@ -205,35 +207,45 @@ func Test_parseUnsafeRoutes(t *testing.T) {
|
|||||||
assert.Nil(t, routes)
|
assert.Nil(t, routes)
|
||||||
assert.EqualError(t, err, "entry 1.mtu in tun.unsafe_routes is below 500: 499")
|
assert.EqualError(t, err, "entry 1.mtu in tun.unsafe_routes is below 500: 499")
|
||||||
|
|
||||||
|
// bad install
|
||||||
|
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29", "install": "nope"}}}
|
||||||
|
routes, err = parseUnsafeRoutes(c, n)
|
||||||
|
assert.Nil(t, routes)
|
||||||
|
assert.EqualError(t, err, "entry 1.install in tun.unsafe_routes is not a boolean: strconv.ParseBool: parsing \"nope\": invalid syntax")
|
||||||
|
|
||||||
// happy case
|
// happy case
|
||||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{
|
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{
|
||||||
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29"},
|
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29", "install": "t"},
|
||||||
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "8000", "route": "1.0.0.1/32"},
|
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "8000", "route": "1.0.0.1/32", "install": 0},
|
||||||
|
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32", "install": 1},
|
||||||
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32"},
|
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32"},
|
||||||
}}
|
}}
|
||||||
routes, err = parseUnsafeRoutes(c, n)
|
routes, err = parseUnsafeRoutes(c, n)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Len(t, routes, 3)
|
assert.Len(t, routes, 4)
|
||||||
|
|
||||||
tested := 0
|
tested := 0
|
||||||
for _, r := range routes {
|
for _, r := range routes {
|
||||||
if r.MTU == 8000 {
|
if r.MTU == 8000 {
|
||||||
assert.Equal(t, "1.0.0.1/32", r.Cidr.String())
|
assert.Equal(t, "1.0.0.1/32", r.Cidr.String())
|
||||||
|
assert.False(t, r.Install)
|
||||||
tested++
|
tested++
|
||||||
} else if r.MTU == 9000 {
|
} else if r.MTU == 9000 {
|
||||||
assert.Equal(t, 9000, r.MTU)
|
assert.Equal(t, 9000, r.MTU)
|
||||||
assert.Equal(t, "1.0.0.0/29", r.Cidr.String())
|
assert.Equal(t, "1.0.0.0/29", r.Cidr.String())
|
||||||
|
assert.True(t, r.Install)
|
||||||
tested++
|
tested++
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, 1500, r.MTU)
|
assert.Equal(t, 1500, r.MTU)
|
||||||
assert.Equal(t, 1234, r.Metric)
|
assert.Equal(t, 1234, r.Metric)
|
||||||
assert.Equal(t, "1.0.0.2/32", r.Cidr.String())
|
assert.Equal(t, "1.0.0.2/32", r.Cidr.String())
|
||||||
|
assert.True(t, r.Install)
|
||||||
tested++
|
tested++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if tested != 3 {
|
if tested != 4 {
|
||||||
t.Fatal("Did not see both unsafe_routes")
|
t.Fatal("Did not see all unsafe_routes")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, fd *
|
|||||||
c.GetInt("tun.mtu", DefaultMTU),
|
c.GetInt("tun.mtu", DefaultMTU),
|
||||||
routes,
|
routes,
|
||||||
c.GetInt("tun.tx_queue", 500),
|
c.GetInt("tun.tx_queue", 500),
|
||||||
|
c.GetBool("tun.use_system_route_table", false),
|
||||||
)
|
)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -46,6 +47,7 @@ func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, fd *
|
|||||||
routes,
|
routes,
|
||||||
c.GetInt("tun.tx_queue", 500),
|
c.GetInt("tun.tx_queue", 500),
|
||||||
routines > 1,
|
routines > 1,
|
||||||
|
c.GetBool("tun.use_system_route_table", false),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,39 +8,49 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type tun struct {
|
type tun struct {
|
||||||
io.ReadWriteCloser
|
io.ReadWriteCloser
|
||||||
fd int
|
fd int
|
||||||
cidr *net.IPNet
|
cidr *net.IPNet
|
||||||
l *logrus.Logger
|
routeTree *cidr.Tree4
|
||||||
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, _ int, routes []Route, _ int) (*tun, error) {
|
func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, _ int, routes []Route, _ int, _ bool) (*tun, error) {
|
||||||
if len(routes) > 0 {
|
routeTree, err := makeRouteTree(l, routes, false)
|
||||||
return nil, fmt.Errorf("routes are not supported in %s", runtime.GOOS)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// XXX Android returns an fd in non-blocking mode which is necessary for shutdown to work properly.
|
||||||
|
// Be sure not to call file.Fd() as it will set the fd to blocking mode.
|
||||||
file := os.NewFile(uintptr(deviceFd), "/dev/net/tun")
|
file := os.NewFile(uintptr(deviceFd), "/dev/net/tun")
|
||||||
|
|
||||||
return &tun{
|
return &tun{
|
||||||
ReadWriteCloser: file,
|
ReadWriteCloser: file,
|
||||||
fd: int(file.Fd()),
|
fd: deviceFd,
|
||||||
cidr: cidr,
|
cidr: cidr,
|
||||||
l: l,
|
l: l,
|
||||||
|
routeTree: routeTree,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTun(_ *logrus.Logger, _ string, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) {
|
func newTun(_ *logrus.Logger, _ string, _ *net.IPNet, _ int, _ []Route, _ int, _ bool, _ bool) (*tun, error) {
|
||||||
return nil, fmt.Errorf("newTun not supported in Android")
|
return nil, fmt.Errorf("newTun not supported in Android")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) RouteFor(iputil.VpnIp) iputil.VpnIp {
|
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
|
||||||
|
r := t.routeTree.MostSpecificContains(ip)
|
||||||
|
if r != nil {
|
||||||
|
return r.(iputil.VpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ type ifreqMTU struct {
|
|||||||
pad [8]byte
|
pad [8]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTun(l *logrus.Logger, name string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool) (*tun, error) {
|
func newTun(l *logrus.Logger, name string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool, _ bool) (*tun, error) {
|
||||||
routeTree, err := makeRouteTree(l, routes, false)
|
routeTree, err := makeRouteTree(l, routes, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -170,7 +170,7 @@ func (t *tun) deviceBytes() (o [16]byte) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int) (*tun, error) {
|
func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) {
|
||||||
return nil, fmt.Errorf("newTunFromFd not supported in Darwin")
|
return nil, fmt.Errorf("newTunFromFd not supported in Darwin")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -287,7 +287,7 @@ func (t *tun) Activate() error {
|
|||||||
|
|
||||||
// Unsafe path routes
|
// Unsafe path routes
|
||||||
for _, r := range t.Routes {
|
for _, r := range t.Routes {
|
||||||
if r.Via == nil {
|
if r.Via == nil || !r.Install {
|
||||||
// We don't allow route MTUs so only install routes with a via
|
// We don't allow route MTUs so only install routes with a via
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package overlay
|
package overlay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@@ -75,38 +74,15 @@ func (t *disabledTun) Read(b []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *disabledTun) handleICMPEchoRequest(b []byte) bool {
|
func (t *disabledTun) handleICMPEchoRequest(b []byte) bool {
|
||||||
// Return early if this is not a simple ICMP Echo Request
|
out := make([]byte, len(b))
|
||||||
//TODO: make constants out of these
|
out = iputil.CreateICMPEchoResponse(b, out)
|
||||||
if !(len(b) >= 28 && len(b) <= 9001 && b[0] == 0x45 && b[9] == 0x01 && b[20] == 0x08) {
|
if out == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't support fragmented packets
|
|
||||||
if b[7] != 0 || (b[6]&0x2F != 0) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, len(b))
|
|
||||||
copy(buf, b)
|
|
||||||
|
|
||||||
// Swap dest / src IPs and recalculate checksum
|
|
||||||
ipv4 := buf[0:20]
|
|
||||||
copy(ipv4[12:16], b[16:20])
|
|
||||||
copy(ipv4[16:20], b[12:16])
|
|
||||||
ipv4[10] = 0
|
|
||||||
ipv4[11] = 0
|
|
||||||
binary.BigEndian.PutUint16(ipv4[10:], ipChecksum(ipv4))
|
|
||||||
|
|
||||||
// Change type to ICMP Echo Reply and recalculate checksum
|
|
||||||
icmp := buf[20:]
|
|
||||||
icmp[0] = 0
|
|
||||||
icmp[2] = 0
|
|
||||||
icmp[3] = 0
|
|
||||||
binary.BigEndian.PutUint16(icmp[2:], ipChecksum(icmp))
|
|
||||||
|
|
||||||
// attempt to write it, but don't block
|
// attempt to write it, but don't block
|
||||||
select {
|
select {
|
||||||
case t.read <- buf:
|
case t.read <- out:
|
||||||
default:
|
default:
|
||||||
t.l.Debugf("tun_disabled: dropped ICMP Echo Reply response")
|
t.l.Debugf("tun_disabled: dropped ICMP Echo Reply response")
|
||||||
}
|
}
|
||||||
@@ -154,22 +130,3 @@ func (p prettyPacket) String() string {
|
|||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func ipChecksum(b []byte) uint16 {
|
|
||||||
var c uint32
|
|
||||||
sz := len(b) - 1
|
|
||||||
|
|
||||||
for i := 0; i < sz; i += 2 {
|
|
||||||
c += uint32(b[i]) << 8
|
|
||||||
c += uint32(b[i+1])
|
|
||||||
}
|
|
||||||
if sz%2 == 0 {
|
|
||||||
c += uint32(b[sz]) << 8
|
|
||||||
}
|
|
||||||
|
|
||||||
for (c >> 16) > 0 {
|
|
||||||
c = (c & 0xffff) + (c >> 16)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ^uint16(c)
|
|
||||||
}
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user