mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 08:24:25 +01:00
Compare commits
119 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d5299715e | ||
|
|
ba8646fa83 | ||
|
|
c1ed78ffc7 | ||
|
|
cf3b7ec2fa | ||
|
|
a22c134bf5 | ||
|
|
94aaab042f | ||
|
|
b358bbab80 | ||
|
|
bcabcfdaca | ||
|
|
1f75fb3c73 | ||
|
|
6ae8ba26f7 | ||
|
|
32cd9a93f1 | ||
|
|
97afe2ec48 | ||
|
|
32e2619323 | ||
|
|
e8b08e49e6 | ||
|
|
ea2c186a77 | ||
|
|
ae5505bc74 | ||
|
|
afda79feac | ||
|
|
0e7bc290f8 | ||
|
|
3a8f533b24 | ||
|
|
34d002d695 | ||
|
|
9f34c5e2ba | ||
|
|
3f5caf67ff | ||
|
|
e01213cd21 | ||
|
|
af3674ac7b | ||
|
|
c726d20578 | ||
|
|
d13f4b5948 | ||
|
|
2e1d6743be | ||
|
|
d004fae4f9 | ||
|
|
95f4c8a01b | ||
|
|
9ff73cb02f | ||
|
|
98c391396c | ||
|
|
1bc6f5fe6c | ||
|
|
44cb697552 | ||
|
|
db23fdf9bc | ||
|
|
df7c7eec4a | ||
|
|
6f37280e8e | ||
|
|
a0735dd7d5 | ||
|
|
1deb5d98e8 | ||
|
|
a1ee521d79 | ||
|
|
7859140711 | ||
|
|
17106f83a0 | ||
|
|
ab08be1e3e | ||
|
|
710df6a876 | ||
|
|
20bef975cd | ||
|
|
480036fbc8 | ||
|
|
1499be3e40 | ||
|
|
64d8e5aa96 | ||
|
|
75f7bda0a4 | ||
|
|
e7e55618ff | ||
|
|
0c2e5973e1 | ||
|
|
830d6d4639 | ||
|
|
883e09a392 | ||
|
|
4603b5b2dd | ||
|
|
a71541fb0b | ||
|
|
3ea7e1b75f | ||
|
|
7a9f9dbded | ||
|
|
7073d204a8 | ||
|
|
9e94442ce7 | ||
|
|
13471f5792 | ||
|
|
ea07a89cc8 | ||
|
|
3aaaea6309 | ||
|
|
5506da3de9 | ||
|
|
6c55d67f18 | ||
|
|
64d8035d09 | ||
|
|
73a5ed90b2 | ||
|
|
d604270966 | ||
|
|
29c5f31f90 | ||
|
|
b6234abfb3 | ||
|
|
2a4beb41b9 | ||
|
|
d232ccbfab | ||
|
|
ecfb40f29c | ||
|
|
1bae5b2550 | ||
|
|
73081d99bc | ||
|
|
e7e6a23cde | ||
|
|
a0583ebdca | ||
|
|
27d9a67dda | ||
|
|
2bce222550 | ||
|
|
3dd1108099 | ||
|
|
d4b81f9b8d | ||
|
|
454bc8a6bb | ||
|
|
ce9ad37431 | ||
|
|
ee7c27093c | ||
|
|
2e7ca027a4 | ||
|
|
672ce1f0a8 | ||
|
|
384b1166ea | ||
|
|
0389596f66 | ||
|
|
43a3988afc | ||
|
|
5c23676a0f | ||
|
|
f6d0b4b893 | ||
|
|
0d6b55e495 | ||
|
|
c71c84882e | ||
|
|
0010db46e4 | ||
|
|
68e3e84fdc | ||
|
|
6238f1550b | ||
|
|
50b04413c7 | ||
|
|
ef498a31da | ||
|
|
2e5a477a50 | ||
|
|
32fe9bfe75 | ||
|
|
9b8b3c478b | ||
|
|
7b3f23d9a1 | ||
|
|
25964b54f6 | ||
|
|
ac557f381b | ||
|
|
a54f3fc681 | ||
|
|
5545cff6ef | ||
|
|
f3a6d8d990 | ||
|
|
9b06748506 | ||
|
|
4756c9613d | ||
|
|
4645e6034b | ||
|
|
aba42f9fa6 | ||
|
|
41578ca971 | ||
|
|
1ea8847085 | ||
|
|
55858c64cc | ||
|
|
e94c6b0125 | ||
|
|
b37a91cfbc | ||
|
|
3212b769d4 | ||
|
|
ecf0e5a9f6 | ||
|
|
ff13aba8fc | ||
|
|
cc03ff9e9a | ||
|
|
363c836422 |
24
.github/workflows/gofmt.yml
vendored
24
.github/workflows/gofmt.yml
vendored
@@ -14,19 +14,31 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.14
|
- name: Set up Go 1.17
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.14
|
go-version: 1.17
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-gofmt1.17-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-gofmt1.17-
|
||||||
|
|
||||||
|
- name: Install goimports
|
||||||
|
run: |
|
||||||
|
go get golang.org/x/tools/cmd/goimports
|
||||||
|
go build golang.org/x/tools/cmd/goimports
|
||||||
|
|
||||||
- name: gofmt
|
- name: gofmt
|
||||||
run: |
|
run: |
|
||||||
if [ "$(find . -iname '*.go' | xargs gofmt -l)" ]
|
if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -l)" ]
|
||||||
then
|
then
|
||||||
find . -iname '*.go' | xargs gofmt -d
|
find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -d
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
61
.github/workflows/release.yml
vendored
61
.github/workflows/release.yml
vendored
@@ -10,17 +10,17 @@ jobs:
|
|||||||
name: Build Linux All
|
name: Build Linux All
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.14
|
- name: Set up Go 1.17
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.14
|
go-version: 1.17
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux
|
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd
|
||||||
mkdir release
|
mkdir release
|
||||||
mv build/*.tar.gz release
|
mv build/*.tar.gz release
|
||||||
|
|
||||||
@@ -34,10 +34,10 @@ jobs:
|
|||||||
name: Build Windows amd64
|
name: Build Windows amd64
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.14
|
- name: Set up Go 1.17
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.14
|
go-version: 1.17
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
@@ -58,10 +58,10 @@ jobs:
|
|||||||
name: Build Darwin amd64
|
name: Build Darwin amd64
|
||||||
runs-on: macOS-latest
|
runs-on: macOS-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.14
|
- name: Set up Go 1.17
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.14
|
go-version: 1.17
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
@@ -69,6 +69,7 @@ jobs:
|
|||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/nebula-darwin-amd64.tar.gz
|
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/nebula-darwin-amd64.tar.gz
|
||||||
|
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/nebula-darwin-arm64.tar.gz
|
||||||
mkdir release
|
mkdir release
|
||||||
mv build/*.tar.gz release
|
mv build/*.tar.gz release
|
||||||
|
|
||||||
@@ -159,6 +160,16 @@ jobs:
|
|||||||
asset_name: nebula-darwin-amd64.tar.gz
|
asset_name: nebula-darwin-amd64.tar.gz
|
||||||
asset_content_type: application/gzip
|
asset_content_type: application/gzip
|
||||||
|
|
||||||
|
- name: Upload darwin-arm64
|
||||||
|
uses: actions/upload-release-asset@v1.0.1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./darwin-latest/nebula-darwin-arm64.tar.gz
|
||||||
|
asset_name: nebula-darwin-arm64.tar.gz
|
||||||
|
asset_content_type: application/gzip
|
||||||
|
|
||||||
- name: Upload windows-amd64
|
- name: Upload windows-amd64
|
||||||
uses: actions/upload-release-asset@v1.0.1
|
uses: actions/upload-release-asset@v1.0.1
|
||||||
env:
|
env:
|
||||||
@@ -278,3 +289,33 @@ jobs:
|
|||||||
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
|
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
|
||||||
asset_name: nebula-linux-mips64le.tar.gz
|
asset_name: nebula-linux-mips64le.tar.gz
|
||||||
asset_content_type: application/gzip
|
asset_content_type: application/gzip
|
||||||
|
|
||||||
|
- name: Upload linux-mips-softfloat
|
||||||
|
uses: actions/upload-release-asset@v1.0.1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./linux-latest/nebula-linux-mips-softfloat.tar.gz
|
||||||
|
asset_name: nebula-linux-mips-softfloat.tar.gz
|
||||||
|
asset_content_type: application/gzip
|
||||||
|
|
||||||
|
- name: Upload linux-riscv64
|
||||||
|
uses: actions/upload-release-asset@v1.0.1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
|
||||||
|
asset_name: nebula-linux-riscv64.tar.gz
|
||||||
|
asset_content_type: application/gzip
|
||||||
|
|
||||||
|
- name: Upload freebsd-amd64
|
||||||
|
uses: actions/upload-release-asset@v1.0.1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./linux-latest/nebula-freebsd-amd64.tar.gz
|
||||||
|
asset_name: nebula-freebsd-amd64.tar.gz
|
||||||
|
asset_content_type: application/gzip
|
||||||
|
|||||||
18
.github/workflows/smoke.yml
vendored
18
.github/workflows/smoke.yml
vendored
@@ -14,28 +14,28 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
smoke:
|
smoke:
|
||||||
name: Run 3 node smoke test
|
name: Run multi node smoke test
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.14
|
- name: Set up Go 1.17
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.14
|
go-version: 1.17
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- uses: actions/cache@v1
|
- uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go1.17-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go-
|
${{ runner.os }}-go1.17-
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
run: make
|
run: make bin-docker
|
||||||
|
|
||||||
- name: setup docker image
|
- name: setup docker image
|
||||||
working-directory: ./.github/workflows/smoke
|
working-directory: ./.github/workflows/smoke
|
||||||
|
|||||||
6
.github/workflows/smoke/Dockerfile
vendored
6
.github/workflows/smoke/Dockerfile
vendored
@@ -1,5 +1,7 @@
|
|||||||
FROM debian:buster
|
FROM debian:buster
|
||||||
|
|
||||||
ADD ./build /
|
ADD ./build /nebula
|
||||||
|
|
||||||
ENTRYPOINT ["/nebula"]
|
WORKDIR /nebula
|
||||||
|
|
||||||
|
ENTRYPOINT ["/nebula/nebula"]
|
||||||
|
|||||||
35
.github/workflows/smoke/build.sh
vendored
35
.github/workflows/smoke/build.sh
vendored
@@ -8,17 +8,32 @@ mkdir ./build
|
|||||||
(
|
(
|
||||||
cd build
|
cd build
|
||||||
|
|
||||||
cp ../../../../nebula .
|
cp ../../../../build/linux-amd64/nebula .
|
||||||
cp ../../../../nebula-cert .
|
cp ../../../../build/linux-amd64/nebula-cert .
|
||||||
|
|
||||||
HOST="lighthouse1" AM_LIGHTHOUSE=true ../genconfig.sh >lighthouse1.yml
|
HOST="lighthouse1" \
|
||||||
HOST="host2" LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" ../genconfig.sh >host2.yml
|
AM_LIGHTHOUSE=true \
|
||||||
HOST="host3" LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" ../genconfig.sh >host3.yml
|
../genconfig.sh >lighthouse1.yml
|
||||||
|
|
||||||
./nebula-cert ca -name "Smoke Test"
|
HOST="host2" \
|
||||||
./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24"
|
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||||
./nebula-cert sign -name "host2" -ip "192.168.100.2/24"
|
../genconfig.sh >host2.yml
|
||||||
./nebula-cert sign -name "host3" -ip "192.168.100.3/24"
|
|
||||||
|
HOST="host3" \
|
||||||
|
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||||
|
INBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||||
|
../genconfig.sh >host3.yml
|
||||||
|
|
||||||
|
HOST="host4" \
|
||||||
|
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||||
|
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||||
|
../genconfig.sh >host4.yml
|
||||||
|
|
||||||
|
../../../../nebula-cert ca -name "Smoke Test"
|
||||||
|
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
|
||||||
|
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
|
||||||
|
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
|
||||||
|
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||||
)
|
)
|
||||||
|
|
||||||
docker build -t nebula:smoke .
|
sudo docker build -t nebula:smoke .
|
||||||
|
|||||||
18
.github/workflows/smoke/genconfig.sh
vendored
18
.github/workflows/smoke/genconfig.sh
vendored
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
FIREWALL_ALL='[{"port": "any", "proto": "any", "host": "any"}]'
|
||||||
|
|
||||||
if [ "$STATIC_HOSTS" ] || [ "$LIGHTHOUSES" ]
|
if [ "$STATIC_HOSTS" ] || [ "$LIGHTHOUSES" ]
|
||||||
then
|
then
|
||||||
@@ -32,9 +33,9 @@ lighthouse_hosts() {
|
|||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
pki:
|
pki:
|
||||||
ca: /ca.crt
|
ca: ca.crt
|
||||||
cert: /${HOST}.crt
|
cert: ${HOST}.crt
|
||||||
key: /${HOST}.key
|
key: ${HOST}.key
|
||||||
|
|
||||||
lighthouse:
|
lighthouse:
|
||||||
am_lighthouse: ${AM_LIGHTHOUSE:-false}
|
am_lighthouse: ${AM_LIGHTHOUSE:-false}
|
||||||
@@ -48,13 +49,6 @@ tun:
|
|||||||
dev: ${TUN_DEV:-nebula1}
|
dev: ${TUN_DEV:-nebula1}
|
||||||
|
|
||||||
firewall:
|
firewall:
|
||||||
outbound:
|
outbound: ${OUTBOUND:-$FIREWALL_ALL}
|
||||||
- port: any
|
inbound: ${INBOUND:-$FIREWALL_ALL}
|
||||||
proto: any
|
|
||||||
host: any
|
|
||||||
|
|
||||||
inbound:
|
|
||||||
- port: any
|
|
||||||
proto: any
|
|
||||||
host: any
|
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
71
.github/workflows/smoke/smoke.sh
vendored
71
.github/workflows/smoke/smoke.sh
vendored
@@ -1,16 +1,33 @@
|
|||||||
#!/bin/sh
|
#!/bin/bash
|
||||||
|
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
docker run --name lighthouse1 --rm nebula:smoke -config lighthouse1.yml -test
|
set -o pipefail
|
||||||
docker run --name host2 --rm nebula:smoke -config host2.yml -test
|
|
||||||
docker run --name host3 --rm nebula:smoke -config host3.yml -test
|
|
||||||
|
|
||||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml &
|
mkdir -p logs
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
set +e
|
||||||
|
if [ "$(jobs -r)" ]
|
||||||
|
then
|
||||||
|
sudo docker kill lighthouse1 host2 host3 host4
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
sudo docker run --name lighthouse1 --rm nebula:smoke -config lighthouse1.yml -test
|
||||||
|
sudo docker run --name host2 --rm nebula:smoke -config host2.yml -test
|
||||||
|
sudo docker run --name host3 --rm nebula:smoke -config host3.yml -test
|
||||||
|
sudo docker run --name host4 --rm nebula:smoke -config host4.yml -test
|
||||||
|
|
||||||
|
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 &
|
||||||
sleep 1
|
sleep 1
|
||||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml &
|
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml 2>&1 | tee logs/host2 &
|
||||||
sleep 1
|
sleep 1
|
||||||
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host3.yml &
|
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host3.yml 2>&1 | tee logs/host3 &
|
||||||
|
sleep 1
|
||||||
|
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host4.yml 2>&1 | tee logs/host4 &
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
@@ -18,21 +35,49 @@ echo
|
|||||||
echo " *** Testing ping from lighthouse1"
|
echo " *** Testing ping from lighthouse1"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
||||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host2"
|
echo " *** Testing ping from host2"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
docker exec host2 ping -c1 192.168.100.1
|
sudo docker exec host2 ping -c1 192.168.100.1
|
||||||
docker exec host2 ping -c1 192.168.100.3
|
# Should fail because not allowed by host3 inbound firewall
|
||||||
|
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo " *** Testing ping from host3"
|
echo " *** Testing ping from host3"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
docker exec host3 ping -c1 192.168.100.1
|
sudo docker exec host3 ping -c1 192.168.100.1
|
||||||
docker exec host3 ping -c1 192.168.100.2
|
sudo docker exec host3 ping -c1 192.168.100.2
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing ping from host4"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
sudo docker exec host4 ping -c1 192.168.100.1
|
||||||
|
# Should fail because not allowed by host4 outbound firewall
|
||||||
|
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||||
|
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
|
|
||||||
|
set +x
|
||||||
|
echo
|
||||||
|
echo " *** Testing conntrack"
|
||||||
|
echo
|
||||||
|
set -x
|
||||||
|
# host2 can ping host3 now that host3 pinged it first
|
||||||
|
sudo docker exec host2 ping -c1 192.168.100.3
|
||||||
|
# host4 can ping host2 once conntrack established
|
||||||
|
sudo docker exec host2 ping -c1 192.168.100.4
|
||||||
|
sudo docker exec host4 ping -c1 192.168.100.2
|
||||||
|
|
||||||
|
sudo docker exec host4 sh -c 'kill 1'
|
||||||
|
sudo docker exec host3 sh -c 'kill 1'
|
||||||
|
sudo docker exec host2 sh -c 'kill 1'
|
||||||
|
sudo docker exec lighthouse1 sh -c 'kill 1'
|
||||||
|
sleep 1
|
||||||
|
|||||||
34
.github/workflows/test.yml
vendored
34
.github/workflows/test.yml
vendored
@@ -18,21 +18,21 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.14
|
- name: Set up Go 1.17
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.14
|
go-version: 1.17
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- uses: actions/cache@v1
|
- uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go1.17-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go-
|
${{ runner.os }}-go1.17-
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make all
|
run: make all
|
||||||
@@ -40,6 +40,9 @@ jobs:
|
|||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
|
- name: End 2 end
|
||||||
|
run: make e2evv
|
||||||
|
|
||||||
test:
|
test:
|
||||||
name: Build and test on ${{ matrix.os }}
|
name: Build and test on ${{ matrix.os }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
@@ -48,21 +51,21 @@ jobs:
|
|||||||
os: [windows-latest, macOS-latest]
|
os: [windows-latest, macOS-latest]
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Set up Go 1.14
|
- name: Set up Go 1.17
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.14
|
go-version: 1.17
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- uses: actions/cache@v1
|
- uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go1.17-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go-
|
${{ runner.os }}-go1.17-
|
||||||
|
|
||||||
- name: Build nebula
|
- name: Build nebula
|
||||||
run: go build ./cmd/nebula
|
run: go build ./cmd/nebula
|
||||||
@@ -72,3 +75,6 @@ jobs:
|
|||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: go test -v ./...
|
run: go test -v ./...
|
||||||
|
|
||||||
|
- name: End 2 end
|
||||||
|
run: make e2evv
|
||||||
|
|||||||
173
CHANGELOG.md
173
CHANGELOG.md
@@ -7,6 +7,175 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- SSH `print-cert` has a new `-raw` flag to get the PEM representation of a certificate. (#483)
|
||||||
|
|
||||||
|
- New build architecture: Linux `riscv64`. (#542)
|
||||||
|
|
||||||
|
- New experimental config option `remote_allow_ranges`. (#540)
|
||||||
|
|
||||||
|
- New config option `pki.disconnect_invalid` that will tear down tunnels when they become invalid (through expiry or
|
||||||
|
removal of root trust). Default is `false`. Note, this will not currently recognize if a remote has changed
|
||||||
|
certificates since the last handshake. (#370)
|
||||||
|
|
||||||
|
- New config option `unsafe_routes.<route>.metric` will set a metric for a specific unsafe route. It's useful if you have
|
||||||
|
more than one identical route and want to prefer one against the other.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Build against go 1.17. (#553)
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
|
||||||
|
- The `preferred_ranges` option has been supported as a replacement for
|
||||||
|
`local_range` since v1.0.0. It has now been documented and `local_range`
|
||||||
|
has been officially deprecated. (#541)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Valid recv_error packets were incorrectly marked as "spoofing" and ignored. (#482)
|
||||||
|
|
||||||
|
- SSH server handles single `exec` requests correctly. (#483)
|
||||||
|
|
||||||
|
- Signing a certificate with `nebula-cert sign` now verifies that the supplied
|
||||||
|
ca-key matches the ca-crt. (#503)
|
||||||
|
|
||||||
|
- If `preferred_ranges` (or the deprecated `local_range`) is configured, we
|
||||||
|
will immediately switch to a preferred remote address after the reception of
|
||||||
|
a handshake packet (instead of waiting until 1,000 packets have been sent).
|
||||||
|
(#532)
|
||||||
|
|
||||||
|
- A race condition when `punchy.respond` is enabled and ensures the correct
|
||||||
|
vpn ip is sent a punch back response in highly queried node. (#566)
|
||||||
|
|
||||||
|
## [1.4.0] - 2021-05-11
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Ability to output qr code images in `print`, `ca`, and `sign` modes for `nebula-cert`.
|
||||||
|
This is useful when configuring mobile clients. (#297)
|
||||||
|
|
||||||
|
- Experimental: Nebula can now do work on more than 2 cpu cores in send and receive paths via
|
||||||
|
the new `routines` config option. (#382, #391, #395)
|
||||||
|
|
||||||
|
- ICMP ping requests can be responded to when the `tun.disabled` is `true`.
|
||||||
|
This is useful so that you can "ping" a lighthouse running in this mode. (#342)
|
||||||
|
|
||||||
|
- Run smoke tests via `make smoke-docker`. (#287)
|
||||||
|
|
||||||
|
- More reported stats, udp memory use on linux, build version (when using Prometheus), firewall,
|
||||||
|
handshake, and cached packet stats. (#390, #405, #450, #453)
|
||||||
|
|
||||||
|
- IPv6 support for the underlay network. (#369)
|
||||||
|
|
||||||
|
- End to end testing, run with `make e2e`. (#425, #427, #428)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Darwin will now log stdout/stderr to a file when using `-service` mode. (#303)
|
||||||
|
|
||||||
|
- Example systemd unit file now better arranged startup order when using `sshd`
|
||||||
|
and other fixes. (#317, #412, #438)
|
||||||
|
|
||||||
|
- Reduced memory utilization/garbage collection. (#320, #323, #340)
|
||||||
|
|
||||||
|
- Reduced CPU utilization. (#329)
|
||||||
|
|
||||||
|
- Build against go 1.16. (#381)
|
||||||
|
|
||||||
|
- Refactored handshakes to improve performance and correctness. (#401, #402, #404, #416, #451)
|
||||||
|
|
||||||
|
- Improved roaming support for mobile clients. (#394, #457)
|
||||||
|
|
||||||
|
- Lighthouse performance and correctness improvements. (#406, #418, #429, #433, #437, #442, #449)
|
||||||
|
|
||||||
|
- Better ordered startup to enable `sshd`, `stats`, and `dns` subsystems to listen on
|
||||||
|
the nebula interface. (#375)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- No longer report handshake packets as `lost` in stats. (#331)
|
||||||
|
|
||||||
|
- Error handling in the `cert` package. (#339, #373)
|
||||||
|
|
||||||
|
- Orphaned pending hostmap entries are cleaned up. (#344)
|
||||||
|
|
||||||
|
- Most known data races are now resolved. (#396, #400, #424)
|
||||||
|
|
||||||
|
- Refuse to run a lighthouse on an ephemeral port. (#399)
|
||||||
|
|
||||||
|
- Removed the global references. (#423, #426, #446)
|
||||||
|
|
||||||
|
- Reloading via ssh command avoids a panic. (#447)
|
||||||
|
|
||||||
|
- Shutdown is now performed in a cleaner way. (#448)
|
||||||
|
|
||||||
|
- Logs will now find their way to Windows event viewer when running under `-service` mode
|
||||||
|
in Windows. (#443)
|
||||||
|
|
||||||
|
## [1.3.0] - 2020-09-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- You can emit statistics about non-message packets by setting the option
|
||||||
|
`stats.message_metrics`. You can similarly emit detailed statistics about
|
||||||
|
lighthouse packets by setting the option `stats.lighthouse_metrics`. See
|
||||||
|
the example config for more details. (#230)
|
||||||
|
|
||||||
|
- We now support freebsd/amd64. This is experimental, please give us feedback.
|
||||||
|
(#103)
|
||||||
|
|
||||||
|
- We now release a binary for `linux/mips-softfloat` which has also been
|
||||||
|
stripped to reduce filesize and hopefully have a better chance on running on
|
||||||
|
small mips devices. (#231)
|
||||||
|
|
||||||
|
- You can set `tun.disabled` to true to run a standalone lighthouse without a
|
||||||
|
tun device (and thus, without root). (#269)
|
||||||
|
|
||||||
|
- You can set `logging.disable_timestamp` to remove timestamps from log lines,
|
||||||
|
which is useful when output is redirected to a logging system that already
|
||||||
|
adds timestamps. (#288)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Handshakes should now trigger faster, as we try to be proactive with sending
|
||||||
|
them instead of waiting for the next timer tick in most cases. (#246, #265)
|
||||||
|
|
||||||
|
- Previously, we would drop the conntrack table whenever firewall rules were
|
||||||
|
changed during a SIGHUP. Now, we will maintain the table and just validate
|
||||||
|
that an entry still matches with the new rule set. (#233)
|
||||||
|
|
||||||
|
- Debug logs for firewall drops now include the reason. (#220, #239)
|
||||||
|
|
||||||
|
- Logs for handshakes now include the fingerprint of the remote host. (#262)
|
||||||
|
|
||||||
|
- Config item `pki.blacklist` is now `pki.blocklist`. (#272)
|
||||||
|
|
||||||
|
- Better support for older Linux kernels. We now only set `SO_REUSEPORT` if
|
||||||
|
`tun.routines` is greater than 1 (default is 1). We also only use the
|
||||||
|
`recvmmsg` syscall if `listen.batch` is greater than 1 (default is 64).
|
||||||
|
(#275)
|
||||||
|
|
||||||
|
- It is possible to run Nebula as a library inside of another process now.
|
||||||
|
Note that this is still experimental and the internal APIs around this might
|
||||||
|
change in minor version releases. (#279)
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
|
||||||
|
- `pki.blacklist` is deprecated in favor of `pki.blocklist` with the same
|
||||||
|
functionality. Existing configs will continue to load for this release to
|
||||||
|
allow for migrations. (#272)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- `advmss` is now set correctly for each route table entry when `tun.routes`
|
||||||
|
is configured to have some routes with higher MTU. (#245)
|
||||||
|
|
||||||
|
- Packets that arrive on the tun device with an unroutable destination IP are
|
||||||
|
now dropped correctly, instead of wasting time making queries to the
|
||||||
|
lighthouses for IP `0.0.0.0` (#267)
|
||||||
|
|
||||||
## [1.2.0] - 2020-04-08
|
## [1.2.0] - 2020-04-08
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@@ -118,7 +287,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
- Initial public release.
|
- Initial public release.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.2.0...HEAD
|
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.4.0...HEAD
|
||||||
|
[1.4.0]: https://github.com/slackhq/nebula/releases/tag/v1.4.0
|
||||||
|
[1.3.0]: https://github.com/slackhq/nebula/releases/tag/v1.3.0
|
||||||
[1.2.0]: https://github.com/slackhq/nebula/releases/tag/v1.2.0
|
[1.2.0]: https://github.com/slackhq/nebula/releases/tag/v1.2.0
|
||||||
[1.1.0]: https://github.com/slackhq/nebula/releases/tag/v1.1.0
|
[1.1.0]: https://github.com/slackhq/nebula/releases/tag/v1.1.0
|
||||||
[1.0.0]: https://github.com/slackhq/nebula/releases/tag/v1.0.0
|
[1.0.0]: https://github.com/slackhq/nebula/releases/tag/v1.0.0
|
||||||
|
|||||||
100
Makefile
100
Makefile
@@ -1,8 +1,33 @@
|
|||||||
|
GOMINVERSION = 1.17
|
||||||
NEBULA_CMD_PATH = "./cmd/nebula"
|
NEBULA_CMD_PATH = "./cmd/nebula"
|
||||||
BUILD_NUMBER ?= dev+$(shell date -u '+%Y%m%d%H%M%S')
|
|
||||||
GO111MODULE = on
|
GO111MODULE = on
|
||||||
export GO111MODULE
|
export GO111MODULE
|
||||||
|
|
||||||
|
# Set up OS specific bits
|
||||||
|
ifeq ($(OS),Windows_NT)
|
||||||
|
#TODO: we should be able to ditch awk as well
|
||||||
|
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
|
||||||
|
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
|
||||||
|
NEBULA_CMD_SUFFIX = .exe
|
||||||
|
NULL_FILE = nul
|
||||||
|
else
|
||||||
|
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
|
||||||
|
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
|
||||||
|
NEBULA_CMD_SUFFIX =
|
||||||
|
NULL_FILE = /dev/null
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Only defined the build number if we haven't already
|
||||||
|
ifndef BUILD_NUMBER
|
||||||
|
ifeq ($(shell git describe --exact-match 2>$(NULL_FILE)),)
|
||||||
|
BUILD_NUMBER = $(shell git describe --abbrev=0 --match "v*" | cut -dv -f2)-$(shell git branch --show-current)-$(shell git describe --long --dirty | cut -d- -f2-)
|
||||||
|
else
|
||||||
|
BUILD_NUMBER = $(shell git describe --exact-match --dirty | cut -dv -f2)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
LDFLAGS = -X main.Build=$(BUILD_NUMBER)
|
||||||
|
|
||||||
ALL_LINUX = linux-amd64 \
|
ALL_LINUX = linux-amd64 \
|
||||||
linux-386 \
|
linux-386 \
|
||||||
linux-ppc64le \
|
linux-ppc64le \
|
||||||
@@ -13,43 +38,73 @@ ALL_LINUX = linux-amd64 \
|
|||||||
linux-mips \
|
linux-mips \
|
||||||
linux-mipsle \
|
linux-mipsle \
|
||||||
linux-mips64 \
|
linux-mips64 \
|
||||||
linux-mips64le
|
linux-mips64le \
|
||||||
|
linux-mips-softfloat \
|
||||||
|
linux-riscv64
|
||||||
|
|
||||||
ALL = $(ALL_LINUX) \
|
ALL = $(ALL_LINUX) \
|
||||||
darwin-amd64 \
|
darwin-amd64 \
|
||||||
|
darwin-arm64 \
|
||||||
|
freebsd-amd64 \
|
||||||
windows-amd64
|
windows-amd64
|
||||||
|
|
||||||
|
e2e:
|
||||||
|
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
|
||||||
|
|
||||||
|
e2ev: TEST_FLAGS = -v
|
||||||
|
e2ev: e2e
|
||||||
|
|
||||||
|
e2evv: TEST_ENV += TEST_LOGS=1
|
||||||
|
e2evv: e2ev
|
||||||
|
|
||||||
|
e2evvv: TEST_ENV += TEST_LOGS=2
|
||||||
|
e2evvv: e2ev
|
||||||
|
|
||||||
|
e2evvvv: TEST_ENV += TEST_LOGS=3
|
||||||
|
e2evvvv: e2ev
|
||||||
|
|
||||||
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
||||||
|
|
||||||
release: $(ALL:%=build/nebula-%.tar.gz)
|
release: $(ALL:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
|
release-freebsd: build/nebula-freebsd-amd64.tar.gz
|
||||||
|
|
||||||
|
BUILD_ARGS = -trimpath
|
||||||
|
|
||||||
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
|
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
|
||||||
mv $? .
|
mv $? .
|
||||||
|
|
||||||
bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
|
bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
|
||||||
mv $? .
|
mv $? .
|
||||||
|
|
||||||
|
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
|
||||||
|
mv $? .
|
||||||
|
|
||||||
bin:
|
bin:
|
||||||
go build -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" -o ./nebula ${NEBULA_CMD_PATH}
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
||||||
go build -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" -o ./nebula-cert ./cmd/nebula-cert
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
||||||
|
|
||||||
install:
|
install:
|
||||||
go install -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" ${NEBULA_CMD_PATH}
|
go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
|
||||||
go install -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" ./cmd/nebula-cert
|
go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
|
||||||
|
|
||||||
|
build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*))
|
||||||
|
build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
|
||||||
|
|
||||||
|
# Build an extra small binary for mips-softfloat
|
||||||
|
build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
||||||
|
|
||||||
build/%/nebula: .FORCE
|
build/%/nebula: .FORCE
|
||||||
GOOS=$(firstword $(subst -, , $*)) \
|
GOOS=$(firstword $(subst -, , $*)) \
|
||||||
GOARCH=$(word 2, $(subst -, ,$*)) \
|
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
|
||||||
GOARM=$(word 3, $(subst -, ,$*)) \
|
go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
|
||||||
go build -trimpath -o $@ -ldflags "-X main.Build=$(BUILD_NUMBER)" ${NEBULA_CMD_PATH}
|
|
||||||
|
|
||||||
build/%/nebula-cert: .FORCE
|
build/%/nebula-cert: .FORCE
|
||||||
GOOS=$(firstword $(subst -, , $*)) \
|
GOOS=$(firstword $(subst -, , $*)) \
|
||||||
GOARCH=$(word 2, $(subst -, ,$*)) \
|
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
|
||||||
GOARM=$(word 3, $(subst -, ,$*)) \
|
go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
|
||||||
go build -trimpath -o $@ -ldflags "-X main.Build=$(BUILD_NUMBER)" ./cmd/nebula-cert
|
|
||||||
|
|
||||||
build/%/nebula.exe: build/%/nebula
|
build/%/nebula.exe: build/%/nebula
|
||||||
mv $< $@
|
mv $< $@
|
||||||
@@ -87,20 +142,29 @@ bench-cpu-long:
|
|||||||
proto: nebula.pb.go cert/cert.pb.go
|
proto: nebula.pb.go cert/cert.pb.go
|
||||||
|
|
||||||
nebula.pb.go: nebula.proto .FORCE
|
nebula.pb.go: nebula.proto .FORCE
|
||||||
go build github.com/golang/protobuf/protoc-gen-go
|
go build github.com/gogo/protobuf/protoc-gen-gogofaster
|
||||||
PATH="$(PWD):$(PATH)" protoc --go_out=. $<
|
PATH="$(CURDIR):$(PATH)" protoc --gogofaster_out=paths=source_relative:. $<
|
||||||
rm protoc-gen-go
|
rm protoc-gen-gogofaster
|
||||||
|
|
||||||
cert/cert.pb.go: cert/cert.proto .FORCE
|
cert/cert.pb.go: cert/cert.proto .FORCE
|
||||||
$(MAKE) -C cert cert.pb.go
|
$(MAKE) -C cert cert.pb.go
|
||||||
|
|
||||||
service:
|
service:
|
||||||
@echo > /dev/null
|
@echo > $(NULL_FILE)
|
||||||
$(eval NEBULA_CMD_PATH := "./cmd/nebula-service")
|
$(eval NEBULA_CMD_PATH := "./cmd/nebula-service")
|
||||||
ifeq ($(words $(MAKECMDGOALS)),1)
|
ifeq ($(words $(MAKECMDGOALS)),1)
|
||||||
$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
|
@$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
||||||
|
|
||||||
|
smoke-docker: bin-docker
|
||||||
|
cd .github/workflows/smoke/ && ./build.sh
|
||||||
|
cd .github/workflows/smoke/ && ./smoke.sh
|
||||||
|
|
||||||
|
smoke-docker-race: BUILD_ARGS = -race
|
||||||
|
smoke-docker-race: smoke-docker
|
||||||
|
|
||||||
.FORCE:
|
.FORCE:
|
||||||
.PHONY: test test-cov-html bench bench-cpu bench-cpu-long bin proto release service
|
.PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race
|
||||||
.DEFAULT_GOAL := bin
|
.DEFAULT_GOAL := bin
|
||||||
|
|||||||
39
README.md
39
README.md
@@ -1,7 +1,6 @@
|
|||||||
## What is Nebula?
|
## What is Nebula?
|
||||||
Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security.
|
Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security.
|
||||||
It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, and Windows.
|
It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, Windows, iOS, and Android.
|
||||||
(Also: keep this quiet, but we have an early prototype running on iOS).
|
|
||||||
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
|
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
|
||||||
|
|
||||||
Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
|
Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
|
||||||
@@ -9,9 +8,39 @@ and tunneling, and each of those individual pieces existed before Nebula in vari
|
|||||||
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
||||||
resulting in a sum that is greater than its individual parts.
|
resulting in a sum that is greater than its individual parts.
|
||||||
|
|
||||||
|
Further documentation can be found [here](https://www.defined.net/nebula/introduction/).
|
||||||
|
|
||||||
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
||||||
|
|
||||||
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU)
|
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU).
|
||||||
|
|
||||||
|
## Supported Platforms
|
||||||
|
|
||||||
|
#### Desktop and Server
|
||||||
|
|
||||||
|
Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for downloads or see the [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) section.
|
||||||
|
|
||||||
|
- Linux - 64 and 32 bit, arm, and others
|
||||||
|
- Windows
|
||||||
|
- MacOS
|
||||||
|
- Freebsd
|
||||||
|
|
||||||
|
#### Distribution Packages
|
||||||
|
|
||||||
|
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/)
|
||||||
|
```
|
||||||
|
$ sudo pacman -S nebula
|
||||||
|
```
|
||||||
|
- [Fedora Linux](https://copr.fedorainfracloud.org/coprs/jdoss/nebula/)
|
||||||
|
```
|
||||||
|
$ sudo dnf copr enable jdoss/nebula
|
||||||
|
$ sudo dnf install nebula
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Mobile
|
||||||
|
|
||||||
|
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
||||||
|
- [Android](https://play.google.com/store/apps/details?id=net.defined.mobile_nebula&pcampaignid=pcampaignidMKT-Other-global-all-co-prtnr-py-PartBadge-Mar2515-1)
|
||||||
|
|
||||||
## Technical Overview
|
## Technical Overview
|
||||||
|
|
||||||
@@ -29,7 +58,7 @@ Nebula was created to provide a mechanism for groups hosts to communicate secure
|
|||||||
|
|
||||||
To set up a Nebula network, you'll need:
|
To set up a Nebula network, you'll need:
|
||||||
|
|
||||||
#### 1. The [Nebula binaries](https://github.com/slackhq/nebula/releases) for your specific platform. Specifically you'll need `nebula-cert` and the specific nebula binary for each platform you use.
|
#### 1. The [Nebula binaries](https://github.com/slackhq/nebula/releases) or [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) for your specific platform. Specifically you'll need `nebula-cert` and the specific nebula binary for each platform you use.
|
||||||
|
|
||||||
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
|
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
|
||||||
|
|
||||||
|
|||||||
319
allow_list.go
319
allow_list.go
@@ -2,12 +2,29 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AllowList struct {
|
type AllowList struct {
|
||||||
// The values of this cidrTree are `bool`, signifying allow/deny
|
// The values of this cidrTree are `bool`, signifying allow/deny
|
||||||
cidrTree *CIDRTree
|
cidrTree *cidr.Tree6
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoteAllowList struct {
|
||||||
|
AllowList *AllowList
|
||||||
|
|
||||||
|
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
||||||
|
// are *AllowList
|
||||||
|
insideAllowLists *cidr.Tree6
|
||||||
|
}
|
||||||
|
|
||||||
|
type LocalAllowList struct {
|
||||||
|
AllowList *AllowList
|
||||||
|
|
||||||
// To avoid ambiguity, all rules must be true, or all rules must be false.
|
// To avoid ambiguity, all rules must be true, or all rules must be false.
|
||||||
nameRules []AllowListNameRule
|
nameRules []AllowListNameRule
|
||||||
@@ -18,7 +35,224 @@ type AllowListNameRule struct {
|
|||||||
Allow bool
|
Allow bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *AllowList) Allow(ip uint32) bool {
|
func NewLocalAllowListFromConfig(c *config.C, k string) (*LocalAllowList, error) {
|
||||||
|
var nameRules []AllowListNameRule
|
||||||
|
handleKey := func(key string, value interface{}) (bool, error) {
|
||||||
|
if key == "interfaces" {
|
||||||
|
var err error
|
||||||
|
nameRules, err = getAllowListInterfaces(k, value)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
al, err := newAllowListFromConfig(c, k, handleKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &LocalAllowList{AllowList: al, nameRules: nameRules}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRemoteAllowListFromConfig(c *config.C, k, rangesKey string) (*RemoteAllowList, error) {
|
||||||
|
al, err := newAllowListFromConfig(c, k, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remoteAllowRanges, err := getRemoteAllowRanges(c, rangesKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &RemoteAllowList{AllowList: al, insideAllowLists: remoteAllowRanges}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the handleKey func returns true, the rest of the parsing is skipped
|
||||||
|
// for this key. This allows parsing of special values like `interfaces`.
|
||||||
|
func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
|
||||||
|
r := c.Get(k)
|
||||||
|
if r == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return newAllowList(k, r, handleKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the handleKey func returns true, the rest of the parsing is skipped
|
||||||
|
// for this key. This allows parsing of special values like `interfaces`.
|
||||||
|
func newAllowList(k string, raw interface{}, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
|
||||||
|
rawMap, ok := raw.(map[interface{}]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
tree := cidr.NewTree6()
|
||||||
|
|
||||||
|
// Keep track of the rules we have added for both ipv4 and ipv6
|
||||||
|
type allowListRules struct {
|
||||||
|
firstValue bool
|
||||||
|
allValuesMatch bool
|
||||||
|
defaultSet bool
|
||||||
|
allValues bool
|
||||||
|
}
|
||||||
|
|
||||||
|
rules4 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
||||||
|
rules6 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
||||||
|
|
||||||
|
for rawKey, rawValue := range rawMap {
|
||||||
|
rawCIDR, ok := rawKey.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if handleKey != nil {
|
||||||
|
handled, err := handleKey(rawCIDR, rawValue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if handled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
value, ok := rawValue.(bool)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: should we error on duplicate CIDRs in the config?
|
||||||
|
tree.AddCIDR(ipNet, value)
|
||||||
|
|
||||||
|
maskBits, maskSize := ipNet.Mask.Size()
|
||||||
|
|
||||||
|
var rules *allowListRules
|
||||||
|
if maskSize == 32 {
|
||||||
|
rules = &rules4
|
||||||
|
} else {
|
||||||
|
rules = &rules6
|
||||||
|
}
|
||||||
|
|
||||||
|
if rules.firstValue {
|
||||||
|
rules.allValues = value
|
||||||
|
rules.firstValue = false
|
||||||
|
} else {
|
||||||
|
if value != rules.allValues {
|
||||||
|
rules.allValuesMatch = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is 0.0.0.0/0 or ::/0
|
||||||
|
if maskBits == 0 {
|
||||||
|
rules.defaultSet = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rules4.defaultSet {
|
||||||
|
if rules4.allValuesMatch {
|
||||||
|
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
|
||||||
|
tree.AddCIDR(zeroCIDR, !rules4.allValues)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rules6.defaultSet {
|
||||||
|
if rules6.allValuesMatch {
|
||||||
|
_, zeroCIDR, _ := net.ParseCIDR("::/0")
|
||||||
|
tree.AddCIDR(zeroCIDR, !rules6.allValues)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &AllowList{cidrTree: tree}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) {
|
||||||
|
var nameRules []AllowListNameRule
|
||||||
|
|
||||||
|
rawRules, ok := v.(map[interface{}]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
firstEntry := true
|
||||||
|
var allValues bool
|
||||||
|
for rawName, rawAllow := range rawRules {
|
||||||
|
name, ok := rawName.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName)
|
||||||
|
}
|
||||||
|
allow, ok := rawAllow.(bool)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
|
||||||
|
}
|
||||||
|
|
||||||
|
nameRE, err := regexp.Compile("^" + name + "$")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config `%s.interfaces` has invalid key: %s: %v", k, name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nameRules = append(nameRules, AllowListNameRule{
|
||||||
|
Name: nameRE,
|
||||||
|
Allow: allow,
|
||||||
|
})
|
||||||
|
|
||||||
|
if firstEntry {
|
||||||
|
allValues = allow
|
||||||
|
firstEntry = false
|
||||||
|
} else {
|
||||||
|
if allow != allValues {
|
||||||
|
return nil, fmt.Errorf("config `%s.interfaces` values must all be the same true/false value", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nameRules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
|
||||||
|
value := c.Get(k)
|
||||||
|
if value == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteAllowRanges := cidr.NewTree6()
|
||||||
|
|
||||||
|
rawMap, ok := value.(map[interface{}]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||||
|
}
|
||||||
|
for rawKey, rawValue := range rawMap {
|
||||||
|
rawCIDR, ok := rawKey.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
allowList, err := newAllowList(fmt.Sprintf("%s.%s", k, rawCIDR), rawValue, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteAllowRanges.AddCIDR(ipNet, allowList)
|
||||||
|
}
|
||||||
|
|
||||||
|
return remoteAllowRanges, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *AllowList) Allow(ip net.IP) bool {
|
||||||
if al == nil {
|
if al == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -32,7 +266,42 @@ func (al *AllowList) Allow(ip uint32) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *AllowList) AllowName(name string) bool {
|
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
||||||
|
switch v := result.(type) {
|
||||||
|
case bool:
|
||||||
|
return v
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
||||||
|
switch v := result.(type) {
|
||||||
|
case bool:
|
||||||
|
return v
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *LocalAllowList) Allow(ip net.IP) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return al.AllowList.Allow(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *LocalAllowList) AllowName(name string) bool {
|
||||||
if al == nil || len(al.nameRules) == 0 {
|
if al == nil || len(al.nameRules) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -46,3 +315,47 @@ func (al *AllowList) AllowName(name string) bool {
|
|||||||
// If no rules match, return the default, which is the inverse of the rules
|
// If no rules match, return the default, which is the inverse of the rules
|
||||||
return !al.nameRules[0].Allow
|
return !al.nameRules[0].Allow
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return al.AllowList.Allow(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool {
|
||||||
|
if !al.getInsideAllowList(vpnIp).Allow(ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return al.AllowList.Allow(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return al.AllowList.AllowIpV4(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return al.AllowList.AllowIpV6(hi, lo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
|
||||||
|
if al.insideAllowLists != nil {
|
||||||
|
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
||||||
|
if inside != nil {
|
||||||
|
return inside.(*AllowList)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,31 +5,129 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAllowList_Allow(t *testing.T) {
|
func TestNewAllowListFromConfig(t *testing.T) {
|
||||||
assert.Equal(t, true, ((*AllowList)(nil)).Allow(ip2int(net.ParseIP("1.1.1.1"))))
|
l := util.NewTestLogger()
|
||||||
|
c := config.NewC(l)
|
||||||
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
|
"192.168.0.0": true,
|
||||||
|
}
|
||||||
|
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
||||||
|
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
|
||||||
|
assert.Nil(t, r)
|
||||||
|
|
||||||
tree := NewCIDRTree()
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
tree.AddCIDR(getCIDR("0.0.0.0/0"), true)
|
"192.168.0.0/16": "abc",
|
||||||
tree.AddCIDR(getCIDR("10.0.0.0/8"), false)
|
}
|
||||||
tree.AddCIDR(getCIDR("10.42.42.0/24"), true)
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
al := &AllowList{cidrTree: tree}
|
assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
|
||||||
|
|
||||||
assert.Equal(t, true, al.Allow(ip2int(net.ParseIP("1.1.1.1"))))
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
assert.Equal(t, false, al.Allow(ip2int(net.ParseIP("10.0.0.4"))))
|
"192.168.0.0/16": true,
|
||||||
assert.Equal(t, true, al.Allow(ip2int(net.ParseIP("10.42.42.42"))))
|
"10.0.0.0/8": false,
|
||||||
|
}
|
||||||
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
|
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
|
||||||
|
|
||||||
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
|
"0.0.0.0/0": true,
|
||||||
|
"10.0.0.0/8": false,
|
||||||
|
"10.42.42.0/24": true,
|
||||||
|
"fd00::/8": true,
|
||||||
|
"fd00:fd00::/16": false,
|
||||||
|
}
|
||||||
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
|
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
|
||||||
|
|
||||||
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
|
"0.0.0.0/0": true,
|
||||||
|
"10.0.0.0/8": false,
|
||||||
|
"10.42.42.0/24": true,
|
||||||
|
}
|
||||||
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotNil(t, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
|
"0.0.0.0/0": true,
|
||||||
|
"10.0.0.0/8": false,
|
||||||
|
"10.42.42.0/24": true,
|
||||||
|
"::/0": false,
|
||||||
|
"fd00::/8": true,
|
||||||
|
"fd00:fd00::/16": false,
|
||||||
|
}
|
||||||
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotNil(t, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test interface names
|
||||||
|
|
||||||
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
|
"interfaces": map[interface{}]interface{}{
|
||||||
|
`docker.*`: "foo",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
lr, err := NewLocalAllowListFromConfig(c, "allowlist")
|
||||||
|
assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
|
||||||
|
|
||||||
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
|
"interfaces": map[interface{}]interface{}{
|
||||||
|
`docker.*`: false,
|
||||||
|
`eth.*`: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
|
||||||
|
assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
|
||||||
|
|
||||||
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
|
"interfaces": map[interface{}]interface{}{
|
||||||
|
`docker.*`: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotNil(t, lr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAllowList_AllowName(t *testing.T) {
|
func TestAllowList_Allow(t *testing.T) {
|
||||||
assert.Equal(t, true, ((*AllowList)(nil)).AllowName("docker0"))
|
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
|
||||||
|
|
||||||
|
tree := cidr.NewTree6()
|
||||||
|
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
|
||||||
|
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
|
||||||
|
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
|
||||||
|
tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true)
|
||||||
|
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true)
|
||||||
|
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false)
|
||||||
|
tree.AddCIDR(cidr.Parse("::1/128"), true)
|
||||||
|
tree.AddCIDR(cidr.Parse("::2/128"), false)
|
||||||
|
al := &AllowList{cidrTree: tree}
|
||||||
|
|
||||||
|
assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1")))
|
||||||
|
assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4")))
|
||||||
|
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42")))
|
||||||
|
assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41")))
|
||||||
|
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1")))
|
||||||
|
assert.Equal(t, true, al.Allow(net.ParseIP("::1")))
|
||||||
|
assert.Equal(t, false, al.Allow(net.ParseIP("::2")))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalAllowList_AllowName(t *testing.T) {
|
||||||
|
assert.Equal(t, true, ((*LocalAllowList)(nil)).AllowName("docker0"))
|
||||||
|
|
||||||
rules := []AllowListNameRule{
|
rules := []AllowListNameRule{
|
||||||
{Name: regexp.MustCompile("^docker.*$"), Allow: false},
|
{Name: regexp.MustCompile("^docker.*$"), Allow: false},
|
||||||
{Name: regexp.MustCompile("^tun.*$"), Allow: false},
|
{Name: regexp.MustCompile("^tun.*$"), Allow: false},
|
||||||
}
|
}
|
||||||
al := &AllowList{nameRules: rules}
|
al := &LocalAllowList{nameRules: rules}
|
||||||
|
|
||||||
assert.Equal(t, false, al.AllowName("docker0"))
|
assert.Equal(t, false, al.AllowName("docker0"))
|
||||||
assert.Equal(t, false, al.AllowName("tun0"))
|
assert.Equal(t, false, al.AllowName("tun0"))
|
||||||
@@ -39,7 +137,7 @@ func TestAllowList_AllowName(t *testing.T) {
|
|||||||
{Name: regexp.MustCompile("^eth.*$"), Allow: true},
|
{Name: regexp.MustCompile("^eth.*$"), Allow: true},
|
||||||
{Name: regexp.MustCompile("^ens.*$"), Allow: true},
|
{Name: regexp.MustCompile("^ens.*$"), Allow: true},
|
||||||
}
|
}
|
||||||
al = &AllowList{nameRules: rules}
|
al = &LocalAllowList{nameRules: rules}
|
||||||
|
|
||||||
assert.Equal(t, false, al.AllowName("docker0"))
|
assert.Equal(t, false, al.AllowName("docker0"))
|
||||||
assert.Equal(t, true, al.AllowName("eth0"))
|
assert.Equal(t, true, al.AllowName("eth0"))
|
||||||
|
|||||||
4
bits.go
4
bits.go
@@ -26,7 +26,7 @@ func NewBits(bits uint64) *Bits {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bits) Check(i uint64) bool {
|
func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool {
|
||||||
// If i is the next number, return true.
|
// If i is the next number, return true.
|
||||||
if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) {
|
if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) {
|
||||||
return true
|
return true
|
||||||
@@ -47,7 +47,7 @@ func (b *Bits) Check(i uint64) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bits) Update(i uint64) bool {
|
func (b *Bits) Update(l *logrus.Logger, i uint64) bool {
|
||||||
// If i is the next number, return true and update current.
|
// If i is the next number, return true and update current.
|
||||||
if i == b.current+1 {
|
if i == b.current+1 {
|
||||||
// Report missed packets, we can only understand what was missed after the first window has been gone through
|
// Report missed packets, we can only understand what was missed after the first window has been gone through
|
||||||
|
|||||||
159
bits_test.go
159
bits_test.go
@@ -3,10 +3,12 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBits(t *testing.T) {
|
func TestBits(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
b := NewBits(10)
|
b := NewBits(10)
|
||||||
|
|
||||||
// make sure it is the right size
|
// make sure it is the right size
|
||||||
@@ -14,46 +16,46 @@ func TestBits(t *testing.T) {
|
|||||||
|
|
||||||
// This is initialized to zero - receive one. This should work.
|
// This is initialized to zero - receive one. This should work.
|
||||||
|
|
||||||
assert.True(t, b.Check(1))
|
assert.True(t, b.Check(l, 1))
|
||||||
u := b.Update(1)
|
u := b.Update(l, 1)
|
||||||
assert.True(t, u)
|
assert.True(t, u)
|
||||||
assert.EqualValues(t, 1, b.current)
|
assert.EqualValues(t, 1, b.current)
|
||||||
g := []bool{false, true, false, false, false, false, false, false, false, false}
|
g := []bool{false, true, false, false, false, false, false, false, false, false}
|
||||||
assert.Equal(t, g, b.bits)
|
assert.Equal(t, g, b.bits)
|
||||||
|
|
||||||
// Receive two
|
// Receive two
|
||||||
assert.True(t, b.Check(2))
|
assert.True(t, b.Check(l, 2))
|
||||||
u = b.Update(2)
|
u = b.Update(l, 2)
|
||||||
assert.True(t, u)
|
assert.True(t, u)
|
||||||
assert.EqualValues(t, 2, b.current)
|
assert.EqualValues(t, 2, b.current)
|
||||||
g = []bool{false, true, true, false, false, false, false, false, false, false}
|
g = []bool{false, true, true, false, false, false, false, false, false, false}
|
||||||
assert.Equal(t, g, b.bits)
|
assert.Equal(t, g, b.bits)
|
||||||
|
|
||||||
// Receive two again - it will fail
|
// Receive two again - it will fail
|
||||||
assert.False(t, b.Check(2))
|
assert.False(t, b.Check(l, 2))
|
||||||
u = b.Update(2)
|
u = b.Update(l, 2)
|
||||||
assert.False(t, u)
|
assert.False(t, u)
|
||||||
assert.EqualValues(t, 2, b.current)
|
assert.EqualValues(t, 2, b.current)
|
||||||
|
|
||||||
// Jump ahead to 15, which should clear everything and set the 6th element
|
// Jump ahead to 15, which should clear everything and set the 6th element
|
||||||
assert.True(t, b.Check(15))
|
assert.True(t, b.Check(l, 15))
|
||||||
u = b.Update(15)
|
u = b.Update(l, 15)
|
||||||
assert.True(t, u)
|
assert.True(t, u)
|
||||||
assert.EqualValues(t, 15, b.current)
|
assert.EqualValues(t, 15, b.current)
|
||||||
g = []bool{false, false, false, false, false, true, false, false, false, false}
|
g = []bool{false, false, false, false, false, true, false, false, false, false}
|
||||||
assert.Equal(t, g, b.bits)
|
assert.Equal(t, g, b.bits)
|
||||||
|
|
||||||
// Mark 14, which is allowed because it is in the window
|
// Mark 14, which is allowed because it is in the window
|
||||||
assert.True(t, b.Check(14))
|
assert.True(t, b.Check(l, 14))
|
||||||
u = b.Update(14)
|
u = b.Update(l, 14)
|
||||||
assert.True(t, u)
|
assert.True(t, u)
|
||||||
assert.EqualValues(t, 15, b.current)
|
assert.EqualValues(t, 15, b.current)
|
||||||
g = []bool{false, false, false, false, true, true, false, false, false, false}
|
g = []bool{false, false, false, false, true, true, false, false, false, false}
|
||||||
assert.Equal(t, g, b.bits)
|
assert.Equal(t, g, b.bits)
|
||||||
|
|
||||||
// Mark 5, which is not allowed because it is not in the window
|
// Mark 5, which is not allowed because it is not in the window
|
||||||
assert.False(t, b.Check(5))
|
assert.False(t, b.Check(l, 5))
|
||||||
u = b.Update(5)
|
u = b.Update(l, 5)
|
||||||
assert.False(t, u)
|
assert.False(t, u)
|
||||||
assert.EqualValues(t, 15, b.current)
|
assert.EqualValues(t, 15, b.current)
|
||||||
g = []bool{false, false, false, false, true, true, false, false, false, false}
|
g = []bool{false, false, false, false, true, true, false, false, false, false}
|
||||||
@@ -61,63 +63,65 @@ func TestBits(t *testing.T) {
|
|||||||
|
|
||||||
// make sure we handle wrapping around once to the current position
|
// make sure we handle wrapping around once to the current position
|
||||||
b = NewBits(10)
|
b = NewBits(10)
|
||||||
assert.True(t, b.Update(1))
|
assert.True(t, b.Update(l, 1))
|
||||||
assert.True(t, b.Update(11))
|
assert.True(t, b.Update(l, 11))
|
||||||
assert.Equal(t, []bool{false, true, false, false, false, false, false, false, false, false}, b.bits)
|
assert.Equal(t, []bool{false, true, false, false, false, false, false, false, false, false}, b.bits)
|
||||||
|
|
||||||
// Walk through a few windows in order
|
// Walk through a few windows in order
|
||||||
b = NewBits(10)
|
b = NewBits(10)
|
||||||
for i := uint64(0); i <= 100; i++ {
|
for i := uint64(0); i <= 100; i++ {
|
||||||
assert.True(t, b.Check(i), "Error while checking %v", i)
|
assert.True(t, b.Check(l, i), "Error while checking %v", i)
|
||||||
assert.True(t, b.Update(i), "Error while updating %v", i)
|
assert.True(t, b.Update(l, i), "Error while updating %v", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBitsDupeCounter(t *testing.T) {
|
func TestBitsDupeCounter(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
b := NewBits(10)
|
b := NewBits(10)
|
||||||
b.lostCounter.Clear()
|
b.lostCounter.Clear()
|
||||||
b.dupeCounter.Clear()
|
b.dupeCounter.Clear()
|
||||||
b.outOfWindowCounter.Clear()
|
b.outOfWindowCounter.Clear()
|
||||||
|
|
||||||
assert.True(t, b.Update(1))
|
assert.True(t, b.Update(l, 1))
|
||||||
assert.Equal(t, int64(0), b.dupeCounter.Count())
|
assert.Equal(t, int64(0), b.dupeCounter.Count())
|
||||||
|
|
||||||
assert.False(t, b.Update(1))
|
assert.False(t, b.Update(l, 1))
|
||||||
assert.Equal(t, int64(1), b.dupeCounter.Count())
|
assert.Equal(t, int64(1), b.dupeCounter.Count())
|
||||||
|
|
||||||
assert.True(t, b.Update(2))
|
assert.True(t, b.Update(l, 2))
|
||||||
assert.Equal(t, int64(1), b.dupeCounter.Count())
|
assert.Equal(t, int64(1), b.dupeCounter.Count())
|
||||||
|
|
||||||
assert.True(t, b.Update(3))
|
assert.True(t, b.Update(l, 3))
|
||||||
assert.Equal(t, int64(1), b.dupeCounter.Count())
|
assert.Equal(t, int64(1), b.dupeCounter.Count())
|
||||||
|
|
||||||
assert.False(t, b.Update(1))
|
assert.False(t, b.Update(l, 1))
|
||||||
assert.Equal(t, int64(0), b.lostCounter.Count())
|
assert.Equal(t, int64(0), b.lostCounter.Count())
|
||||||
assert.Equal(t, int64(2), b.dupeCounter.Count())
|
assert.Equal(t, int64(2), b.dupeCounter.Count())
|
||||||
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBitsOutOfWindowCounter(t *testing.T) {
|
func TestBitsOutOfWindowCounter(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
b := NewBits(10)
|
b := NewBits(10)
|
||||||
b.lostCounter.Clear()
|
b.lostCounter.Clear()
|
||||||
b.dupeCounter.Clear()
|
b.dupeCounter.Clear()
|
||||||
b.outOfWindowCounter.Clear()
|
b.outOfWindowCounter.Clear()
|
||||||
|
|
||||||
assert.True(t, b.Update(20))
|
assert.True(t, b.Update(l, 20))
|
||||||
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
||||||
|
|
||||||
assert.True(t, b.Update(21))
|
assert.True(t, b.Update(l, 21))
|
||||||
assert.True(t, b.Update(22))
|
assert.True(t, b.Update(l, 22))
|
||||||
assert.True(t, b.Update(23))
|
assert.True(t, b.Update(l, 23))
|
||||||
assert.True(t, b.Update(24))
|
assert.True(t, b.Update(l, 24))
|
||||||
assert.True(t, b.Update(25))
|
assert.True(t, b.Update(l, 25))
|
||||||
assert.True(t, b.Update(26))
|
assert.True(t, b.Update(l, 26))
|
||||||
assert.True(t, b.Update(27))
|
assert.True(t, b.Update(l, 27))
|
||||||
assert.True(t, b.Update(28))
|
assert.True(t, b.Update(l, 28))
|
||||||
assert.True(t, b.Update(29))
|
assert.True(t, b.Update(l, 29))
|
||||||
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
||||||
|
|
||||||
assert.False(t, b.Update(0))
|
assert.False(t, b.Update(l, 0))
|
||||||
assert.Equal(t, int64(1), b.outOfWindowCounter.Count())
|
assert.Equal(t, int64(1), b.outOfWindowCounter.Count())
|
||||||
|
|
||||||
//tODO: make sure lostcounter doesn't increase in orderly increment
|
//tODO: make sure lostcounter doesn't increase in orderly increment
|
||||||
@@ -127,23 +131,24 @@ func TestBitsOutOfWindowCounter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBitsLostCounter(t *testing.T) {
|
func TestBitsLostCounter(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
b := NewBits(10)
|
b := NewBits(10)
|
||||||
b.lostCounter.Clear()
|
b.lostCounter.Clear()
|
||||||
b.dupeCounter.Clear()
|
b.dupeCounter.Clear()
|
||||||
b.outOfWindowCounter.Clear()
|
b.outOfWindowCounter.Clear()
|
||||||
|
|
||||||
//assert.True(t, b.Update(0))
|
//assert.True(t, b.Update(0))
|
||||||
assert.True(t, b.Update(0))
|
assert.True(t, b.Update(l, 0))
|
||||||
assert.True(t, b.Update(20))
|
assert.True(t, b.Update(l, 20))
|
||||||
assert.True(t, b.Update(21))
|
assert.True(t, b.Update(l, 21))
|
||||||
assert.True(t, b.Update(22))
|
assert.True(t, b.Update(l, 22))
|
||||||
assert.True(t, b.Update(23))
|
assert.True(t, b.Update(l, 23))
|
||||||
assert.True(t, b.Update(24))
|
assert.True(t, b.Update(l, 24))
|
||||||
assert.True(t, b.Update(25))
|
assert.True(t, b.Update(l, 25))
|
||||||
assert.True(t, b.Update(26))
|
assert.True(t, b.Update(l, 26))
|
||||||
assert.True(t, b.Update(27))
|
assert.True(t, b.Update(l, 27))
|
||||||
assert.True(t, b.Update(28))
|
assert.True(t, b.Update(l, 28))
|
||||||
assert.True(t, b.Update(29))
|
assert.True(t, b.Update(l, 29))
|
||||||
assert.Equal(t, int64(20), b.lostCounter.Count())
|
assert.Equal(t, int64(20), b.lostCounter.Count())
|
||||||
assert.Equal(t, int64(0), b.dupeCounter.Count())
|
assert.Equal(t, int64(0), b.dupeCounter.Count())
|
||||||
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
|
||||||
@@ -153,56 +158,56 @@ func TestBitsLostCounter(t *testing.T) {
|
|||||||
b.dupeCounter.Clear()
|
b.dupeCounter.Clear()
|
||||||
b.outOfWindowCounter.Clear()
|
b.outOfWindowCounter.Clear()
|
||||||
|
|
||||||
assert.True(t, b.Update(0))
|
assert.True(t, b.Update(l, 0))
|
||||||
assert.Equal(t, int64(0), b.lostCounter.Count())
|
assert.Equal(t, int64(0), b.lostCounter.Count())
|
||||||
assert.True(t, b.Update(9))
|
assert.True(t, b.Update(l, 9))
|
||||||
assert.Equal(t, int64(0), b.lostCounter.Count())
|
assert.Equal(t, int64(0), b.lostCounter.Count())
|
||||||
// 10 will set 0 index, 0 was already set, no lost packets
|
// 10 will set 0 index, 0 was already set, no lost packets
|
||||||
assert.True(t, b.Update(10))
|
assert.True(t, b.Update(l, 10))
|
||||||
assert.Equal(t, int64(0), b.lostCounter.Count())
|
assert.Equal(t, int64(0), b.lostCounter.Count())
|
||||||
// 11 will set 1 index, 1 was missed, we should see 1 packet lost
|
// 11 will set 1 index, 1 was missed, we should see 1 packet lost
|
||||||
assert.True(t, b.Update(11))
|
assert.True(t, b.Update(l, 11))
|
||||||
assert.Equal(t, int64(1), b.lostCounter.Count())
|
assert.Equal(t, int64(1), b.lostCounter.Count())
|
||||||
// Now let's fill in the window, should end up with 8 lost packets
|
// Now let's fill in the window, should end up with 8 lost packets
|
||||||
assert.True(t, b.Update(12))
|
assert.True(t, b.Update(l, 12))
|
||||||
assert.True(t, b.Update(13))
|
assert.True(t, b.Update(l, 13))
|
||||||
assert.True(t, b.Update(14))
|
assert.True(t, b.Update(l, 14))
|
||||||
assert.True(t, b.Update(15))
|
assert.True(t, b.Update(l, 15))
|
||||||
assert.True(t, b.Update(16))
|
assert.True(t, b.Update(l, 16))
|
||||||
assert.True(t, b.Update(17))
|
assert.True(t, b.Update(l, 17))
|
||||||
assert.True(t, b.Update(18))
|
assert.True(t, b.Update(l, 18))
|
||||||
assert.True(t, b.Update(19))
|
assert.True(t, b.Update(l, 19))
|
||||||
assert.Equal(t, int64(8), b.lostCounter.Count())
|
assert.Equal(t, int64(8), b.lostCounter.Count())
|
||||||
|
|
||||||
// Jump ahead by a window size
|
// Jump ahead by a window size
|
||||||
assert.True(t, b.Update(29))
|
assert.True(t, b.Update(l, 29))
|
||||||
assert.Equal(t, int64(8), b.lostCounter.Count())
|
assert.Equal(t, int64(8), b.lostCounter.Count())
|
||||||
// Now lets walk ahead normally through the window, the missed packets should fill in
|
// Now lets walk ahead normally through the window, the missed packets should fill in
|
||||||
assert.True(t, b.Update(30))
|
assert.True(t, b.Update(l, 30))
|
||||||
assert.True(t, b.Update(31))
|
assert.True(t, b.Update(l, 31))
|
||||||
assert.True(t, b.Update(32))
|
assert.True(t, b.Update(l, 32))
|
||||||
assert.True(t, b.Update(33))
|
assert.True(t, b.Update(l, 33))
|
||||||
assert.True(t, b.Update(34))
|
assert.True(t, b.Update(l, 34))
|
||||||
assert.True(t, b.Update(35))
|
assert.True(t, b.Update(l, 35))
|
||||||
assert.True(t, b.Update(36))
|
assert.True(t, b.Update(l, 36))
|
||||||
assert.True(t, b.Update(37))
|
assert.True(t, b.Update(l, 37))
|
||||||
assert.True(t, b.Update(38))
|
assert.True(t, b.Update(l, 38))
|
||||||
// 39 packets tracked, 22 seen, 17 lost
|
// 39 packets tracked, 22 seen, 17 lost
|
||||||
assert.Equal(t, int64(17), b.lostCounter.Count())
|
assert.Equal(t, int64(17), b.lostCounter.Count())
|
||||||
|
|
||||||
// Jump ahead by 2 windows, should have recording 1 full window missing
|
// Jump ahead by 2 windows, should have recording 1 full window missing
|
||||||
assert.True(t, b.Update(58))
|
assert.True(t, b.Update(l, 58))
|
||||||
assert.Equal(t, int64(27), b.lostCounter.Count())
|
assert.Equal(t, int64(27), b.lostCounter.Count())
|
||||||
// Now lets walk ahead normally through the window, the missed packets should fill in from this window
|
// Now lets walk ahead normally through the window, the missed packets should fill in from this window
|
||||||
assert.True(t, b.Update(59))
|
assert.True(t, b.Update(l, 59))
|
||||||
assert.True(t, b.Update(60))
|
assert.True(t, b.Update(l, 60))
|
||||||
assert.True(t, b.Update(61))
|
assert.True(t, b.Update(l, 61))
|
||||||
assert.True(t, b.Update(62))
|
assert.True(t, b.Update(l, 62))
|
||||||
assert.True(t, b.Update(63))
|
assert.True(t, b.Update(l, 63))
|
||||||
assert.True(t, b.Update(64))
|
assert.True(t, b.Update(l, 64))
|
||||||
assert.True(t, b.Update(65))
|
assert.True(t, b.Update(l, 65))
|
||||||
assert.True(t, b.Update(66))
|
assert.True(t, b.Update(l, 66))
|
||||||
assert.True(t, b.Update(67))
|
assert.True(t, b.Update(l, 67))
|
||||||
// 68 packets tracked, 32 seen, 36 missed
|
// 68 packets tracked, 32 seen, 36 missed
|
||||||
assert.Equal(t, int64(36), b.lostCounter.Count())
|
assert.Equal(t, int64(36), b.lostCounter.Count())
|
||||||
assert.Equal(t, int64(0), b.dupeCounter.Count())
|
assert.Equal(t, int64(0), b.dupeCounter.Count())
|
||||||
@@ -212,10 +217,10 @@ func TestBitsLostCounter(t *testing.T) {
|
|||||||
func BenchmarkBits(b *testing.B) {
|
func BenchmarkBits(b *testing.B) {
|
||||||
z := NewBits(10)
|
z := NewBits(10)
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
for i, _ := range z.bits {
|
for i := range z.bits {
|
||||||
z.bits[i] = true
|
z.bits[i] = true
|
||||||
}
|
}
|
||||||
for i, _ := range z.bits {
|
for i := range z.bits {
|
||||||
z.bits[i] = false
|
z.bits[i] = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
20
cert.go
20
cert.go
@@ -7,11 +7,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
var trustedCAs *cert.NebulaCAPool
|
|
||||||
|
|
||||||
type CertState struct {
|
type CertState struct {
|
||||||
certificate *cert.NebulaCertificate
|
certificate *cert.NebulaCertificate
|
||||||
rawCertificate []byte
|
rawCertificate []byte
|
||||||
@@ -46,7 +46,7 @@ func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*Cert
|
|||||||
return cs, nil
|
return cs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCertStateFromConfig(c *Config) (*CertState, error) {
|
func NewCertStateFromConfig(c *config.C) (*CertState, error) {
|
||||||
var pemPrivateKey []byte
|
var pemPrivateKey []byte
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@@ -119,7 +119,7 @@ func NewCertStateFromConfig(c *Config) (*CertState, error) {
|
|||||||
return NewCertState(nebulaCert, rawKey)
|
return NewCertState(nebulaCert, rawKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadCAFromConfig(c *Config) (*cert.NebulaCAPool, error) {
|
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
|
||||||
var rawCA []byte
|
var rawCA []byte
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@@ -149,10 +149,16 @@ func loadCAFromConfig(c *Config) (*cert.NebulaCAPool, error) {
|
|||||||
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
|
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pki.blacklist entered the scene at about the same time we aliased x509 to pki, not supporting backwards compat
|
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
|
||||||
|
l.WithField("fingerprint", fp).Infof("Blocklisting cert")
|
||||||
|
CAs.BlocklistFingerprint(fp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Support deprecated config for at leaast one minor release to allow for migrations
|
||||||
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
|
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
|
||||||
l.WithField("fingerprint", fp).Infof("Blacklisting cert")
|
l.WithField("fingerprint", fp).Infof("Blocklisting cert")
|
||||||
CAs.BlacklistFingerprint(fp)
|
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
|
||||||
|
CAs.BlocklistFingerprint(fp)
|
||||||
}
|
}
|
||||||
|
|
||||||
return CAs, nil
|
return CAs, nil
|
||||||
|
|||||||
@@ -2,8 +2,8 @@ GO111MODULE = on
|
|||||||
export GO111MODULE
|
export GO111MODULE
|
||||||
|
|
||||||
cert.pb.go: cert.proto .FORCE
|
cert.pb.go: cert.proto .FORCE
|
||||||
go build github.com/golang/protobuf/protoc-gen-go
|
go build google.golang.org/protobuf/cmd/protoc-gen-go
|
||||||
PATH="$(PWD):$(PATH)" protoc --go_out=. $<
|
PATH="$(CURDIR):$(PATH)" protoc --go_out=. --go_opt=paths=source_relative $<
|
||||||
rm protoc-gen-go
|
rm protoc-gen-go
|
||||||
|
|
||||||
.FORCE:
|
.FORCE:
|
||||||
|
|||||||
22
cert/ca.go
22
cert/ca.go
@@ -8,14 +8,14 @@ import (
|
|||||||
|
|
||||||
type NebulaCAPool struct {
|
type NebulaCAPool struct {
|
||||||
CAs map[string]*NebulaCertificate
|
CAs map[string]*NebulaCertificate
|
||||||
certBlacklist map[string]struct{}
|
certBlocklist map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCAPool creates a CAPool
|
// NewCAPool creates a CAPool
|
||||||
func NewCAPool() *NebulaCAPool {
|
func NewCAPool() *NebulaCAPool {
|
||||||
ca := NebulaCAPool{
|
ca := NebulaCAPool{
|
||||||
CAs: make(map[string]*NebulaCertificate),
|
CAs: make(map[string]*NebulaCertificate),
|
||||||
certBlacklist: make(map[string]struct{}),
|
certBlocklist: make(map[string]struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ca
|
return &ca
|
||||||
@@ -67,24 +67,24 @@ func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) {
|
|||||||
return pemBytes, nil
|
return pemBytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlacklistFingerprint adds a cert fingerprint to the blacklist
|
// BlocklistFingerprint adds a cert fingerprint to the blocklist
|
||||||
func (ncp *NebulaCAPool) BlacklistFingerprint(f string) {
|
func (ncp *NebulaCAPool) BlocklistFingerprint(f string) {
|
||||||
ncp.certBlacklist[f] = struct{}{}
|
ncp.certBlocklist[f] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResetCertBlacklist removes all previously blacklisted cert fingerprints
|
// ResetCertBlocklist removes all previously blocklisted cert fingerprints
|
||||||
func (ncp *NebulaCAPool) ResetCertBlacklist() {
|
func (ncp *NebulaCAPool) ResetCertBlocklist() {
|
||||||
ncp.certBlacklist = make(map[string]struct{})
|
ncp.certBlocklist = make(map[string]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBlacklisted returns true if the fingerprint fails to generate or has been explicitly blacklisted
|
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
|
||||||
func (ncp *NebulaCAPool) IsBlacklisted(c *NebulaCertificate) bool {
|
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
|
||||||
h, err := c.Sha256Sum()
|
h, err := c.Sha256Sum()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := ncp.certBlacklist[h]; ok {
|
if _, ok := ncp.certBlocklist[h]; ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
96
cert/cert.go
96
cert/cert.go
@@ -1,18 +1,18 @@
|
|||||||
package cert
|
package cert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"golang.org/x/crypto/curve25519"
|
"golang.org/x/crypto/curve25519"
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
@@ -61,6 +61,10 @@ func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rc.Details == nil {
|
||||||
|
return nil, fmt.Errorf("encoded Details was nil")
|
||||||
|
}
|
||||||
|
|
||||||
if len(rc.Details.Ips)%2 != 0 {
|
if len(rc.Details.Ips)%2 != 0 {
|
||||||
return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found")
|
return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found")
|
||||||
}
|
}
|
||||||
@@ -123,6 +127,9 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
|
|||||||
if p == nil {
|
if p == nil {
|
||||||
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
|
if p.Type != CertBanner {
|
||||||
|
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
|
||||||
|
}
|
||||||
nc, err := UnmarshalNebulaCertificate(p.Bytes)
|
nc, err := UnmarshalNebulaCertificate(p.Bytes)
|
||||||
return nc, r, err
|
return nc, r, err
|
||||||
}
|
}
|
||||||
@@ -244,10 +251,10 @@ func (nc *NebulaCertificate) Expired(t time.Time) bool {
|
|||||||
return nc.Details.NotBefore.After(t) || nc.Details.NotAfter.Before(t)
|
return nc.Details.NotBefore.After(t) || nc.Details.NotAfter.Before(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blacklist, etc)
|
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
|
||||||
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
|
||||||
if ncp.IsBlacklisted(nc) {
|
if ncp.IsBlocklisted(nc) {
|
||||||
return false, fmt.Errorf("certificate has been blacklisted")
|
return false, fmt.Errorf("certificate has been blocked")
|
||||||
}
|
}
|
||||||
|
|
||||||
signer, err := ncp.GetCAForCert(nc)
|
signer, err := ncp.GetCAForCert(nc)
|
||||||
@@ -318,12 +325,26 @@ func (nc *NebulaCertificate) CheckRootConstrains(signer *NebulaCertificate) erro
|
|||||||
|
|
||||||
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
|
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
|
||||||
func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error {
|
func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error {
|
||||||
var dst, key32 [32]byte
|
if nc.Details.IsCA {
|
||||||
copy(key32[:], key)
|
// the call to PublicKey below will panic slice bounds out of range otherwise
|
||||||
curve25519.ScalarBaseMult(&dst, &key32)
|
if len(key) != ed25519.PrivateKeySize {
|
||||||
if !bytes.Equal(dst[:], nc.Details.PublicKey) {
|
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pub, err := curve25519.X25519(key, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !bytes.Equal(pub, nc.Details.PublicKey) {
|
||||||
|
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -468,6 +489,63 @@ func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
|
|||||||
return json.Marshal(jc)
|
return json.Marshal(jc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//func (nc *NebulaCertificate) Copy() *NebulaCertificate {
|
||||||
|
// r, err := nc.Marshal()
|
||||||
|
// if err != nil {
|
||||||
|
// //TODO
|
||||||
|
// return nil
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// c, err := UnmarshalNebulaCertificate(r)
|
||||||
|
// return c
|
||||||
|
//}
|
||||||
|
|
||||||
|
func (nc *NebulaCertificate) Copy() *NebulaCertificate {
|
||||||
|
c := &NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: nc.Details.Name,
|
||||||
|
Groups: make([]string, len(nc.Details.Groups)),
|
||||||
|
Ips: make([]*net.IPNet, len(nc.Details.Ips)),
|
||||||
|
Subnets: make([]*net.IPNet, len(nc.Details.Subnets)),
|
||||||
|
NotBefore: nc.Details.NotBefore,
|
||||||
|
NotAfter: nc.Details.NotAfter,
|
||||||
|
PublicKey: make([]byte, len(nc.Details.PublicKey)),
|
||||||
|
IsCA: nc.Details.IsCA,
|
||||||
|
Issuer: nc.Details.Issuer,
|
||||||
|
InvertedGroups: make(map[string]struct{}, len(nc.Details.InvertedGroups)),
|
||||||
|
},
|
||||||
|
Signature: make([]byte, len(nc.Signature)),
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(c.Signature, nc.Signature)
|
||||||
|
copy(c.Details.Groups, nc.Details.Groups)
|
||||||
|
copy(c.Details.PublicKey, nc.Details.PublicKey)
|
||||||
|
|
||||||
|
for i, p := range nc.Details.Ips {
|
||||||
|
c.Details.Ips[i] = &net.IPNet{
|
||||||
|
IP: make(net.IP, len(p.IP)),
|
||||||
|
Mask: make(net.IPMask, len(p.Mask)),
|
||||||
|
}
|
||||||
|
copy(c.Details.Ips[i].IP, p.IP)
|
||||||
|
copy(c.Details.Ips[i].Mask, p.Mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, p := range nc.Details.Subnets {
|
||||||
|
c.Details.Subnets[i] = &net.IPNet{
|
||||||
|
IP: make(net.IP, len(p.IP)),
|
||||||
|
Mask: make(net.IPMask, len(p.Mask)),
|
||||||
|
}
|
||||||
|
copy(c.Details.Subnets[i].IP, p.IP)
|
||||||
|
copy(c.Details.Subnets[i].Mask, p.Mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
for g := range nc.Details.InvertedGroups {
|
||||||
|
c.Details.InvertedGroups[g] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
func netMatch(certIp *net.IPNet, rootIps []*net.IPNet) bool {
|
func netMatch(certIp *net.IPNet, rootIps []*net.IPNet) bool {
|
||||||
for _, net := range rootIps {
|
for _, net := range rootIps {
|
||||||
if net.Contains(certIp.IP) && maskContains(net.Mask, certIp.Mask) {
|
if net.Contains(certIp.IP) && maskContains(net.Mask, certIp.Mask) {
|
||||||
|
|||||||
354
cert/cert.pb.go
354
cert/cert.pb.go
@@ -1,202 +1,298 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.26.0
|
||||||
|
// protoc v3.14.0
|
||||||
// source: cert.proto
|
// source: cert.proto
|
||||||
|
|
||||||
package cert
|
package cert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
const (
|
||||||
var _ = proto.Marshal
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
var _ = fmt.Errorf
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
var _ = math.Inf
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
)
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
type RawNebulaCertificate struct {
|
type RawNebulaCertificate struct {
|
||||||
Details *RawNebulaCertificateDetails `protobuf:"bytes,1,opt,name=Details,json=details,proto3" json:"Details,omitempty"`
|
state protoimpl.MessageState
|
||||||
Signature []byte `protobuf:"bytes,2,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty"`
|
sizeCache protoimpl.SizeCache
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
unknownFields protoimpl.UnknownFields
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
Details *RawNebulaCertificateDetails `protobuf:"bytes,1,opt,name=Details,proto3" json:"Details,omitempty"`
|
||||||
|
Signature []byte `protobuf:"bytes,2,opt,name=Signature,proto3" json:"Signature,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaCertificate) Reset() {
|
||||||
|
*x = RawNebulaCertificate{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaCertificate) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificate) Reset() { *m = RawNebulaCertificate{} }
|
|
||||||
func (m *RawNebulaCertificate) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*RawNebulaCertificate) ProtoMessage() {}
|
func (*RawNebulaCertificate) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaCertificate.ProtoReflect.Descriptor instead.
|
||||||
func (*RawNebulaCertificate) Descriptor() ([]byte, []int) {
|
func (*RawNebulaCertificate) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a142e29cbef9b1cf, []int{0}
|
return file_cert_proto_rawDescGZIP(), []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificate) XXX_Unmarshal(b []byte) error {
|
func (x *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
|
||||||
return xxx_messageInfo_RawNebulaCertificate.Unmarshal(m, b)
|
if x != nil {
|
||||||
}
|
return x.Details
|
||||||
func (m *RawNebulaCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_RawNebulaCertificate.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *RawNebulaCertificate) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_RawNebulaCertificate.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *RawNebulaCertificate) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_RawNebulaCertificate.Size(m)
|
|
||||||
}
|
|
||||||
func (m *RawNebulaCertificate) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_RawNebulaCertificate.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_RawNebulaCertificate proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
|
|
||||||
if m != nil {
|
|
||||||
return m.Details
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificate) GetSignature() []byte {
|
func (x *RawNebulaCertificate) GetSignature() []byte {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.Signature
|
return x.Signature
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type RawNebulaCertificateDetails struct {
|
type RawNebulaCertificateDetails struct {
|
||||||
Name string `protobuf:"bytes,1,opt,name=Name,json=name,proto3" json:"Name,omitempty"`
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||||
// Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask
|
// Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask
|
||||||
Ips []uint32 `protobuf:"varint,2,rep,packed,name=Ips,json=ips,proto3" json:"Ips,omitempty"`
|
Ips []uint32 `protobuf:"varint,2,rep,packed,name=Ips,proto3" json:"Ips,omitempty"`
|
||||||
Subnets []uint32 `protobuf:"varint,3,rep,packed,name=Subnets,json=subnets,proto3" json:"Subnets,omitempty"`
|
Subnets []uint32 `protobuf:"varint,3,rep,packed,name=Subnets,proto3" json:"Subnets,omitempty"`
|
||||||
Groups []string `protobuf:"bytes,4,rep,name=Groups,json=groups,proto3" json:"Groups,omitempty"`
|
Groups []string `protobuf:"bytes,4,rep,name=Groups,proto3" json:"Groups,omitempty"`
|
||||||
NotBefore int64 `protobuf:"varint,5,opt,name=NotBefore,json=notBefore,proto3" json:"NotBefore,omitempty"`
|
NotBefore int64 `protobuf:"varint,5,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"`
|
||||||
NotAfter int64 `protobuf:"varint,6,opt,name=NotAfter,json=notAfter,proto3" json:"NotAfter,omitempty"`
|
NotAfter int64 `protobuf:"varint,6,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"`
|
||||||
PublicKey []byte `protobuf:"bytes,7,opt,name=PublicKey,json=publicKey,proto3" json:"PublicKey,omitempty"`
|
PublicKey []byte `protobuf:"bytes,7,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"`
|
||||||
IsCA bool `protobuf:"varint,8,opt,name=IsCA,json=isCA,proto3" json:"IsCA,omitempty"`
|
IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"`
|
||||||
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed
|
||||||
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,json=issuer,proto3" json:"Issuer,omitempty"`
|
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
}
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
func (x *RawNebulaCertificateDetails) Reset() {
|
||||||
|
*x = RawNebulaCertificateDetails{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cert_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RawNebulaCertificateDetails) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) Reset() { *m = RawNebulaCertificateDetails{} }
|
|
||||||
func (m *RawNebulaCertificateDetails) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*RawNebulaCertificateDetails) ProtoMessage() {}
|
func (*RawNebulaCertificateDetails) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cert_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RawNebulaCertificateDetails.ProtoReflect.Descriptor instead.
|
||||||
func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) {
|
func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a142e29cbef9b1cf, []int{1}
|
return file_cert_proto_rawDescGZIP(), []int{1}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) XXX_Unmarshal(b []byte) error {
|
func (x *RawNebulaCertificateDetails) GetName() string {
|
||||||
return xxx_messageInfo_RawNebulaCertificateDetails.Unmarshal(m, b)
|
if x != nil {
|
||||||
}
|
return x.Name
|
||||||
func (m *RawNebulaCertificateDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_RawNebulaCertificateDetails.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *RawNebulaCertificateDetails) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_RawNebulaCertificateDetails.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *RawNebulaCertificateDetails) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_RawNebulaCertificateDetails.Size(m)
|
|
||||||
}
|
|
||||||
func (m *RawNebulaCertificateDetails) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_RawNebulaCertificateDetails.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_RawNebulaCertificateDetails proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetName() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Name
|
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetIps() []uint32 {
|
func (x *RawNebulaCertificateDetails) GetIps() []uint32 {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.Ips
|
return x.Ips
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetSubnets() []uint32 {
|
func (x *RawNebulaCertificateDetails) GetSubnets() []uint32 {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.Subnets
|
return x.Subnets
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetGroups() []string {
|
func (x *RawNebulaCertificateDetails) GetGroups() []string {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.Groups
|
return x.Groups
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetNotBefore() int64 {
|
func (x *RawNebulaCertificateDetails) GetNotBefore() int64 {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.NotBefore
|
return x.NotBefore
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetNotAfter() int64 {
|
func (x *RawNebulaCertificateDetails) GetNotAfter() int64 {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.NotAfter
|
return x.NotAfter
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetPublicKey() []byte {
|
func (x *RawNebulaCertificateDetails) GetPublicKey() []byte {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.PublicKey
|
return x.PublicKey
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetIsCA() bool {
|
func (x *RawNebulaCertificateDetails) GetIsCA() bool {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.IsCA
|
return x.IsCA
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RawNebulaCertificateDetails) GetIssuer() []byte {
|
func (x *RawNebulaCertificateDetails) GetIssuer() []byte {
|
||||||
if m != nil {
|
if x != nil {
|
||||||
return m.Issuer
|
return x.Issuer
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
var File_cert_proto protoreflect.FileDescriptor
|
||||||
proto.RegisterType((*RawNebulaCertificate)(nil), "cert.RawNebulaCertificate")
|
|
||||||
proto.RegisterType((*RawNebulaCertificateDetails)(nil), "cert.RawNebulaCertificateDetails")
|
var file_cert_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x65,
|
||||||
|
0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43,
|
||||||
|
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x44, 0x65,
|
||||||
|
0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
|
||||||
|
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74,
|
||||||
|
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
|
||||||
|
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
||||||
|
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
|
||||||
|
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
||||||
|
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
|
||||||
|
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
|
||||||
|
0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x53,
|
||||||
|
0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x53, 0x75,
|
||||||
|
0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18,
|
||||||
|
0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x0a,
|
||||||
|
0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
|
||||||
|
0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4e,
|
||||||
|
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x4e,
|
||||||
|
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
||||||
|
0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x75, 0x62, 0x6c,
|
||||||
|
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
|
||||||
|
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
|
||||||
|
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
|
||||||
|
0x72, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||||
|
0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71, 0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63,
|
||||||
|
0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("cert.proto", fileDescriptor_a142e29cbef9b1cf) }
|
var (
|
||||||
|
file_cert_proto_rawDescOnce sync.Once
|
||||||
|
file_cert_proto_rawDescData = file_cert_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
var fileDescriptor_a142e29cbef9b1cf = []byte{
|
func file_cert_proto_rawDescGZIP() []byte {
|
||||||
// 279 bytes of a gzipped FileDescriptorProto
|
file_cert_proto_rawDescOnce.Do(func() {
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xcf, 0x4a, 0xf4, 0x30,
|
file_cert_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_proto_rawDescData)
|
||||||
0x14, 0xc5, 0xc9, 0xa4, 0x5f, 0xdb, 0xe4, 0x53, 0x90, 0x20, 0x12, 0xd4, 0x45, 0x9c, 0x55, 0x56,
|
})
|
||||||
0xb3, 0xd0, 0xa5, 0xab, 0x71, 0x04, 0x29, 0x42, 0x91, 0xcc, 0x13, 0xa4, 0xf5, 0x76, 0x08, 0x74,
|
return file_cert_proto_rawDescData
|
||||||
0x9a, 0x9a, 0x3f, 0x88, 0x8f, 0xee, 0x4e, 0x9a, 0x4e, 0x77, 0xe2, 0xee, 0x9e, 0x5f, 0xce, 0x49,
|
}
|
||||||
0x4e, 0x2e, 0xa5, 0x2d, 0xb8, 0xb0, 0x19, 0x9d, 0x0d, 0x96, 0x65, 0xd3, 0xbc, 0xfe, 0xa0, 0x97,
|
|
||||||
0x4a, 0x7f, 0xd6, 0xd0, 0xc4, 0x5e, 0xef, 0xc0, 0x05, 0xd3, 0x99, 0x56, 0x07, 0x60, 0x8f, 0xb4,
|
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||||
0x78, 0x86, 0xa0, 0x4d, 0xef, 0x39, 0x12, 0x48, 0xfe, 0xbf, 0xbf, 0xdb, 0xa4, 0xec, 0x6f, 0xe6,
|
var file_cert_proto_goTypes = []interface{}{
|
||||||
0x93, 0x51, 0x15, 0xef, 0xf3, 0xc0, 0x6e, 0x29, 0xd9, 0x9b, 0xc3, 0xa0, 0x43, 0x74, 0xc0, 0x57,
|
(*RawNebulaCertificate)(nil), // 0: cert.RawNebulaCertificate
|
||||||
0x02, 0xc9, 0x33, 0x45, 0xfc, 0x02, 0xd6, 0xdf, 0x88, 0xde, 0xfc, 0x71, 0x0d, 0x63, 0x34, 0xab,
|
(*RawNebulaCertificateDetails)(nil), // 1: cert.RawNebulaCertificateDetails
|
||||||
0xf5, 0x11, 0xd2, 0xbb, 0x44, 0x65, 0x83, 0x3e, 0x02, 0xbb, 0xa0, 0xb8, 0x1a, 0x3d, 0x5f, 0x09,
|
}
|
||||||
0x2c, 0xcf, 0x15, 0x36, 0xa3, 0x67, 0x9c, 0x16, 0xfb, 0xd8, 0x0c, 0x10, 0x3c, 0xc7, 0x89, 0x16,
|
var file_cert_proto_depIdxs = []int32{
|
||||||
0x7e, 0x96, 0xec, 0x8a, 0xe6, 0x2f, 0xce, 0xc6, 0xd1, 0xf3, 0x4c, 0x60, 0x49, 0x54, 0x7e, 0x48,
|
1, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
||||||
0x6a, 0x6a, 0x55, 0xdb, 0xf0, 0x04, 0x9d, 0x75, 0xc0, 0xff, 0x09, 0x24, 0xb1, 0x22, 0xc3, 0x02,
|
1, // [1:1] is the sub-list for method output_type
|
||||||
0xd8, 0x35, 0x2d, 0x6b, 0x1b, 0xb6, 0x5d, 0x00, 0xc7, 0xf3, 0x74, 0x58, 0x0e, 0x27, 0x3d, 0x25,
|
1, // [1:1] is the sub-list for method input_type
|
||||||
0xdf, 0x62, 0xd3, 0x9b, 0xf6, 0x15, 0xbe, 0x78, 0x31, 0xff, 0x67, 0x5c, 0xc0, 0xd4, 0xb7, 0xf2,
|
1, // [1:1] is the sub-list for extension type_name
|
||||||
0xbb, 0x2d, 0x2f, 0x05, 0x92, 0xa5, 0xca, 0x8c, 0xdf, 0x6d, 0xa7, 0x0e, 0x95, 0xf7, 0x11, 0x1c,
|
1, // [1:1] is the sub-list for extension extendee
|
||||||
0x27, 0xc9, 0x9e, 0x9b, 0xa4, 0x9a, 0x3c, 0xed, 0xfe, 0xe1, 0x27, 0x00, 0x00, 0xff, 0xff, 0x2c,
|
0, // [0:1] is the sub-list for field type_name
|
||||||
0xe3, 0x08, 0x37, 0x89, 0x01, 0x00, 0x00,
|
}
|
||||||
|
|
||||||
|
func init() { file_cert_proto_init() }
|
||||||
|
func file_cert_proto_init() {
|
||||||
|
if File_cert_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_cert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaCertificate); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RawNebulaCertificateDetails); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_cert_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 2,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_cert_proto_goTypes,
|
||||||
|
DependencyIndexes: file_cert_proto_depIdxs,
|
||||||
|
MessageInfos: file_cert_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_cert_proto = out.File
|
||||||
|
file_cert_proto_rawDesc = nil
|
||||||
|
file_cert_proto_goTypes = nil
|
||||||
|
file_cert_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
package cert;
|
package cert;
|
||||||
|
|
||||||
|
option go_package = "github.com/slackhq/nebula/cert";
|
||||||
|
|
||||||
//import "google/protobuf/timestamp.proto";
|
//import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
message RawNebulaCertificate {
|
message RawNebulaCertificate {
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"golang.org/x/crypto/curve25519"
|
"golang.org/x/crypto/curve25519"
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
@@ -172,13 +173,13 @@ func TestNebulaCertificate_Verify(t *testing.T) {
|
|||||||
|
|
||||||
f, err := c.Sha256Sum()
|
f, err := c.Sha256Sum()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
caPool.BlacklistFingerprint(f)
|
caPool.BlocklistFingerprint(f)
|
||||||
|
|
||||||
v, err := c.Verify(time.Now(), caPool)
|
v, err := c.Verify(time.Now(), caPool)
|
||||||
assert.False(t, v)
|
assert.False(t, v)
|
||||||
assert.EqualError(t, err, "certificate has been blacklisted")
|
assert.EqualError(t, err, "certificate has been blocked")
|
||||||
|
|
||||||
caPool.ResetCertBlacklist()
|
caPool.ResetCertBlocklist()
|
||||||
v, err = c.Verify(time.Now(), caPool)
|
v, err = c.Verify(time.Now(), caPool)
|
||||||
assert.True(t, v)
|
assert.True(t, v)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -374,9 +375,16 @@ func TestNebulaCertificate_Verify_Subnets(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNebulaVerifyPrivateKey(t *testing.T) {
|
func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) {
|
||||||
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
err = ca.VerifyPrivateKey(caKey)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
_, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
err = ca.VerifyPrivateKey(caKey2)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
err = c.VerifyPrivateKey(priv)
|
err = c.VerifyPrivateKey(priv)
|
||||||
@@ -446,6 +454,255 @@ BVG+oJpAoqokUBbI4U0N8CSfpUABEkB/Pm5A2xyH/nc8mg/wvGUWG3pZ7nHzaDMf
|
|||||||
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func appendByteSlices(b ...[]byte) []byte {
|
||||||
|
retSlice := []byte{}
|
||||||
|
for _, v := range b {
|
||||||
|
retSlice = append(retSlice, v...)
|
||||||
|
}
|
||||||
|
return retSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmrshalCertPEM(t *testing.T) {
|
||||||
|
goodCert := []byte(`
|
||||||
|
# A good cert
|
||||||
|
-----BEGIN NEBULA CERTIFICATE-----
|
||||||
|
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
||||||
|
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
||||||
|
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
||||||
|
-----END NEBULA CERTIFICATE-----
|
||||||
|
`)
|
||||||
|
badBanner := []byte(`# A bad banner
|
||||||
|
-----BEGIN NOT A NEBULA CERTIFICATE-----
|
||||||
|
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
||||||
|
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
||||||
|
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
||||||
|
-----END NOT A NEBULA CERTIFICATE-----
|
||||||
|
`)
|
||||||
|
invalidPem := []byte(`# Not a valid PEM format
|
||||||
|
-BEGIN NEBULA CERTIFICATE-----
|
||||||
|
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
||||||
|
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
||||||
|
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
||||||
|
-END NEBULA CERTIFICATE----`)
|
||||||
|
|
||||||
|
certBundle := appendByteSlices(goodCert, badBanner, invalidPem)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
cert, rest, err := UnmarshalNebulaCertificateFromPEM(certBundle)
|
||||||
|
assert.NotNil(t, cert)
|
||||||
|
assert.Equal(t, rest, append(badBanner, invalidPem...))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Fail due to invalid banner.
|
||||||
|
cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest)
|
||||||
|
assert.Nil(t, cert)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "bytes did not contain a proper nebula certificate banner")
|
||||||
|
|
||||||
|
// Fail due to ivalid PEM format, because
|
||||||
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
|
cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest)
|
||||||
|
assert.Nil(t, cert)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalEd25519PrivateKey(t *testing.T) {
|
||||||
|
privKey := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
|
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
shortKey := []byte(`# A short key
|
||||||
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
|
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidBanner := []byte(`# Invalid banner
|
||||||
|
-----BEGIN NOT A NEBULA PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
|
-----END NOT A NEBULA PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidPem := []byte(`# Not a valid PEM format
|
||||||
|
-BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
|
-END NEBULA ED25519 PRIVATE KEY-----`)
|
||||||
|
|
||||||
|
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, err := UnmarshalEd25519PrivateKey(keyBundle)
|
||||||
|
assert.Len(t, k, 64)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Fail due to short key
|
||||||
|
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
|
assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
|
||||||
|
|
||||||
|
// Fail due to invalid banner
|
||||||
|
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 private key banner")
|
||||||
|
|
||||||
|
// Fail due to ivalid PEM format, because
|
||||||
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
|
k, rest, err = UnmarshalEd25519PrivateKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalX25519PrivateKey(t *testing.T) {
|
||||||
|
privKey := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA X25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
shortKey := []byte(`# A short key
|
||||||
|
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
|
-----END NEBULA X25519 PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidBanner := []byte(`# Invalid banner
|
||||||
|
-----BEGIN NOT A NEBULA PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NOT A NEBULA PRIVATE KEY-----
|
||||||
|
`)
|
||||||
|
invalidPem := []byte(`# Not a valid PEM format
|
||||||
|
-BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-END NEBULA X25519 PRIVATE KEY-----`)
|
||||||
|
|
||||||
|
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, err := UnmarshalX25519PrivateKey(keyBundle)
|
||||||
|
assert.Len(t, k, 32)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Fail due to short key
|
||||||
|
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
|
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 private key")
|
||||||
|
|
||||||
|
// Fail due to invalid banner
|
||||||
|
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 private key banner")
|
||||||
|
|
||||||
|
// Fail due to ivalid PEM format, because
|
||||||
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
|
k, rest, err = UnmarshalX25519PrivateKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalEd25519PublicKey(t *testing.T) {
|
||||||
|
pubKey := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA ED25519 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
shortKey := []byte(`# A short key
|
||||||
|
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
|
-----END NEBULA ED25519 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
invalidBanner := []byte(`# Invalid banner
|
||||||
|
-----BEGIN NOT A NEBULA PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NOT A NEBULA PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
invalidPem := []byte(`# Not a valid PEM format
|
||||||
|
-BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-END NEBULA ED25519 PUBLIC KEY-----`)
|
||||||
|
|
||||||
|
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, err := UnmarshalEd25519PublicKey(keyBundle)
|
||||||
|
assert.Equal(t, len(k), 32)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
|
||||||
|
// Fail due to short key
|
||||||
|
k, rest, err = UnmarshalEd25519PublicKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
|
assert.EqualError(t, err, "key was not 32 bytes, is invalid ed25519 public key")
|
||||||
|
|
||||||
|
// Fail due to invalid banner
|
||||||
|
k, rest, err = UnmarshalEd25519PublicKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 public key banner")
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
|
// Fail due to ivalid PEM format, because
|
||||||
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
|
k, rest, err = UnmarshalEd25519PublicKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalX25519PublicKey(t *testing.T) {
|
||||||
|
pubKey := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA X25519 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
shortKey := []byte(`# A short key
|
||||||
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||||
|
-----END NEBULA X25519 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
invalidBanner := []byte(`# Invalid banner
|
||||||
|
-----BEGIN NOT A NEBULA PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NOT A NEBULA PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
invalidPem := []byte(`# Not a valid PEM format
|
||||||
|
-BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-END NEBULA X25519 PUBLIC KEY-----`)
|
||||||
|
|
||||||
|
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, err := UnmarshalX25519PublicKey(keyBundle)
|
||||||
|
assert.Equal(t, len(k), 32)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||||
|
|
||||||
|
// Fail due to short key
|
||||||
|
k, rest, err = UnmarshalX25519PublicKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||||
|
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 public key")
|
||||||
|
|
||||||
|
// Fail due to invalid banner
|
||||||
|
k, rest, err = UnmarshalX25519PublicKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 public key banner")
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
|
||||||
|
// Fail due to ivalid PEM format, because
|
||||||
|
// it's missing the requisite pre-encapsulation boundary.
|
||||||
|
k, rest, err = UnmarshalX25519PublicKey(rest)
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Equal(t, rest, invalidPem)
|
||||||
|
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that upgrading the protobuf library does not change how certificates
|
// Ensure that upgrading the protobuf library does not change how certificates
|
||||||
// are marshalled, since this would break signature verification
|
// are marshalled, since this would break signature verification
|
||||||
func TestMarshalingNebulaCertificateConsistency(t *testing.T) {
|
func TestMarshalingNebulaCertificateConsistency(t *testing.T) {
|
||||||
@@ -487,6 +744,24 @@ func TestMarshalingNebulaCertificateConsistency(t *testing.T) {
|
|||||||
assert.Equal(t, "0a0774657374696e67121b8182845080feffff0f828284508080fcff0f8382845080fe83f80f1a1b8182844880fe83f80f8282844880feffff0f838284488080fcff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328f0e0e7d70430a08681c4053a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf", fmt.Sprintf("%x", b))
|
assert.Equal(t, "0a0774657374696e67121b8182845080feffff0f828284508080fcff0f8382845080fe83f80f1a1b8182844880fe83f80f8282844880feffff0f838284488080fcff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328f0e0e7d70430a08681c4053a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf", fmt.Sprintf("%x", b))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNebulaCertificate_Copy(t *testing.T) {
|
||||||
|
ca, _, caKey, err := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
cc := c.Copy()
|
||||||
|
|
||||||
|
util.AssertDeepCopyEqual(t, c, cc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalNebulaCertificate(t *testing.T) {
|
||||||
|
// Test that we don't panic with an invalid certificate (#332)
|
||||||
|
data := []byte("\x98\x00\x00")
|
||||||
|
_, err := UnmarshalNebulaCertificate(data)
|
||||||
|
assert.EqualError(t, err, "encoded Details was nil")
|
||||||
|
}
|
||||||
|
|
||||||
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
|
||||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
if before.IsZero() {
|
if before.IsZero() {
|
||||||
@@ -499,10 +774,11 @@ func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []
|
|||||||
nc := &NebulaCertificate{
|
nc := &NebulaCertificate{
|
||||||
Details: NebulaCertificateDetails{
|
Details: NebulaCertificateDetails{
|
||||||
Name: "test ca",
|
Name: "test ca",
|
||||||
NotBefore: before,
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
NotAfter: after,
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: true,
|
IsCA: true,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -544,17 +820,17 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
|
|||||||
|
|
||||||
if len(ips) == 0 {
|
if len(ips) == 0 {
|
||||||
ips = []*net.IPNet{
|
ips = []*net.IPNet{
|
||||||
{IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
|
{IP: net.ParseIP("10.1.1.1").To4(), Mask: net.IPMask(net.ParseIP("255.255.255.0").To4())},
|
||||||
{IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
|
{IP: net.ParseIP("10.1.1.2").To4(), Mask: net.IPMask(net.ParseIP("255.255.0.0").To4())},
|
||||||
{IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
|
{IP: net.ParseIP("10.1.1.3").To4(), Mask: net.IPMask(net.ParseIP("255.0.255.0").To4())},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(subnets) == 0 {
|
if len(subnets) == 0 {
|
||||||
subnets = []*net.IPNet{
|
subnets = []*net.IPNet{
|
||||||
{IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
|
{IP: net.ParseIP("9.1.1.1").To4(), Mask: net.IPMask(net.ParseIP("255.0.255.0").To4())},
|
||||||
{IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
|
{IP: net.ParseIP("9.1.1.2").To4(), Mask: net.IPMask(net.ParseIP("255.255.255.0").To4())},
|
||||||
{IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
|
{IP: net.ParseIP("9.1.1.3").To4(), Mask: net.IPMask(net.ParseIP("255.255.0.0").To4())},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -566,11 +842,12 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
|
|||||||
Ips: ips,
|
Ips: ips,
|
||||||
Subnets: subnets,
|
Subnets: subnets,
|
||||||
Groups: groups,
|
Groups: groups,
|
||||||
NotBefore: before,
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
NotAfter: after,
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: false,
|
IsCA: false,
|
||||||
Issuer: issuer,
|
Issuer: issuer,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -583,10 +860,15 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
|
|||||||
}
|
}
|
||||||
|
|
||||||
func x25519Keypair() ([]byte, []byte) {
|
func x25519Keypair() ([]byte, []byte) {
|
||||||
var pubkey, privkey [32]byte
|
privkey := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey[:]); err != nil {
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
curve25519.ScalarBaseMult(&pubkey, &privkey)
|
|
||||||
return pubkey[:], privkey[:]
|
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubkey, privkey
|
||||||
}
|
}
|
||||||
|
|||||||
10
cidr/parse.go
Normal file
10
cidr/parse.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
|
// Parse is a convenience function that returns only the IPNet
|
||||||
|
// This function ignores errors since it is primarily a test helper, the result could be nil
|
||||||
|
func Parse(s string) *net.IPNet {
|
||||||
|
_, c, _ := net.ParseCIDR(s)
|
||||||
|
return c
|
||||||
|
}
|
||||||
@@ -1,39 +1,39 @@
|
|||||||
package nebula
|
package cidr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CIDRNode struct {
|
type Node struct {
|
||||||
left *CIDRNode
|
left *Node
|
||||||
right *CIDRNode
|
right *Node
|
||||||
parent *CIDRNode
|
parent *Node
|
||||||
value interface{}
|
value interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type CIDRTree struct {
|
type Tree4 struct {
|
||||||
root *CIDRNode
|
root *Node
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
startbit = uint32(0x80000000)
|
startbit = iputil.VpnIp(0x80000000)
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewCIDRTree() *CIDRTree {
|
func NewTree4() *Tree4 {
|
||||||
tree := new(CIDRTree)
|
tree := new(Tree4)
|
||||||
tree.root = &CIDRNode{}
|
tree.root = &Node{}
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tree *CIDRTree) AddCIDR(cidr *net.IPNet, val interface{}) {
|
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
next := tree.root
|
next := tree.root
|
||||||
|
|
||||||
ip := ip2int(cidr.IP)
|
ip := iputil.Ip2VpnIp(cidr.IP)
|
||||||
mask := ip2int(cidr.Mask)
|
mask := iputil.Ip2VpnIp(cidr.Mask)
|
||||||
|
|
||||||
// Find our last ancestor in the tree
|
// Find our last ancestor in the tree
|
||||||
for bit&mask != 0 {
|
for bit&mask != 0 {
|
||||||
@@ -59,7 +59,7 @@ func (tree *CIDRTree) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
|
|
||||||
// Build up the rest of the tree we don't already have
|
// Build up the rest of the tree we don't already have
|
||||||
for bit&mask != 0 {
|
for bit&mask != 0 {
|
||||||
next = &CIDRNode{}
|
next = &Node{}
|
||||||
next.parent = node
|
next.parent = node
|
||||||
|
|
||||||
if ip&bit != 0 {
|
if ip&bit != 0 {
|
||||||
@@ -76,8 +76,8 @@ func (tree *CIDRTree) AddCIDR(cidr *net.IPNet, val interface{}) {
|
|||||||
node.value = val
|
node.value = val
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the first match, which way be the least specific
|
// Finds the first match, which may be the least specific
|
||||||
func (tree *CIDRTree) Contains(ip uint32) (value interface{}) {
|
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
|
|
||||||
@@ -100,7 +100,7 @@ func (tree *CIDRTree) Contains(ip uint32) (value interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Finds the most specific match
|
// Finds the most specific match
|
||||||
func (tree *CIDRTree) MostSpecificContains(ip uint32) (value interface{}) {
|
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
|
|
||||||
@@ -116,14 +116,13 @@ func (tree *CIDRTree) MostSpecificContains(ip uint32) (value interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bit >>= 1
|
bit >>= 1
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the most specific match
|
// Finds the most specific match
|
||||||
func (tree *CIDRTree) Match(ip uint32) (value interface{}) {
|
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
||||||
bit := startbit
|
bit := startbit
|
||||||
node := tree.root
|
node := tree.root
|
||||||
lastNode := node
|
lastNode := node
|
||||||
@@ -144,27 +143,3 @@ func (tree *CIDRTree) Match(ip uint32) (value interface{}) {
|
|||||||
}
|
}
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper type to avoid converting to IP when logging
|
|
||||||
type IntIp uint32
|
|
||||||
|
|
||||||
func (ip IntIp) String() string {
|
|
||||||
return fmt.Sprintf("%v", int2ip(uint32(ip)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ip IntIp) MarshalJSON() ([]byte, error) {
|
|
||||||
return []byte(fmt.Sprintf("\"%s\"", int2ip(uint32(ip)).String())), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ip2int(ip []byte) uint32 {
|
|
||||||
if len(ip) == 16 {
|
|
||||||
return binary.BigEndian.Uint32(ip[12:16])
|
|
||||||
}
|
|
||||||
return binary.BigEndian.Uint32(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
func int2ip(nn uint32) net.IP {
|
|
||||||
ip := make(net.IP, 4)
|
|
||||||
binary.BigEndian.PutUint32(ip, nn)
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
153
cidr/tree4_test.go
Normal file
153
cidr/tree4_test.go
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCIDRTree_Contains(t *testing.T) {
|
||||||
|
tree := NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
|
||||||
|
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
|
||||||
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{"1", "1.0.0.0"},
|
||||||
|
{"1", "1.255.255.255"},
|
||||||
|
{"2", "2.1.0.0"},
|
||||||
|
{"2", "2.1.255.255"},
|
||||||
|
{"3", "3.1.1.0"},
|
||||||
|
{"3", "3.1.1.255"},
|
||||||
|
{"4a", "4.1.1.255"},
|
||||||
|
{"4a", "4.1.1.1"},
|
||||||
|
{"5", "240.0.0.0"},
|
||||||
|
{"5", "255.255.255.255"},
|
||||||
|
{nil, "239.0.0.0"},
|
||||||
|
{nil, "4.1.2.2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||||
|
}
|
||||||
|
|
||||||
|
tree = NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
|
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||||
|
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
||||||
|
tree := NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/30"), "4b")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||||
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{"1", "1.0.0.0"},
|
||||||
|
{"1", "1.255.255.255"},
|
||||||
|
{"2", "2.1.0.0"},
|
||||||
|
{"2", "2.1.255.255"},
|
||||||
|
{"3", "3.1.1.0"},
|
||||||
|
{"3", "3.1.1.255"},
|
||||||
|
{"4a", "4.1.1.255"},
|
||||||
|
{"4b", "4.1.1.2"},
|
||||||
|
{"4c", "4.1.1.1"},
|
||||||
|
{"5", "240.0.0.0"},
|
||||||
|
{"5", "255.255.255.255"},
|
||||||
|
{nil, "239.0.0.0"},
|
||||||
|
{nil, "4.1.2.2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||||
|
}
|
||||||
|
|
||||||
|
tree = NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
|
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||||
|
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRTree_Match(t *testing.T) {
|
||||||
|
tree := NewTree4()
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{"1a", "4.1.1.0"},
|
||||||
|
{"1b", "4.1.1.1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||||
|
}
|
||||||
|
|
||||||
|
tree = NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
|
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||||
|
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
||||||
|
tree := NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||||
|
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||||
|
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||||
|
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||||
|
|
||||||
|
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||||
|
b.Run("found", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
tree.Contains(ip)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||||
|
b.Run("not found", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
tree.Contains(ip)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCIDRTree_Match(b *testing.B) {
|
||||||
|
tree := NewTree4()
|
||||||
|
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||||
|
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||||
|
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||||
|
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||||
|
|
||||||
|
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||||
|
b.Run("found", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
tree.Match(ip)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||||
|
b.Run("not found", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
tree.Match(ip)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
185
cidr/tree6.go
Normal file
185
cidr/tree6.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
const startbit6 = uint64(1 << 63)
|
||||||
|
|
||||||
|
type Tree6 struct {
|
||||||
|
root4 *Node
|
||||||
|
root6 *Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTree6() *Tree6 {
|
||||||
|
tree := new(Tree6)
|
||||||
|
tree.root4 = &Node{}
|
||||||
|
tree.root6 = &Node{}
|
||||||
|
return tree
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||||
|
var node, next *Node
|
||||||
|
|
||||||
|
cidrIP, ipv4 := isIPV4(cidr.IP)
|
||||||
|
if ipv4 {
|
||||||
|
node = tree.root4
|
||||||
|
next = tree.root4
|
||||||
|
|
||||||
|
} else {
|
||||||
|
node = tree.root6
|
||||||
|
next = tree.root6
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(cidrIP); i += 4 {
|
||||||
|
ip := iputil.Ip2VpnIp(cidrIP[i : i+4])
|
||||||
|
mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4])
|
||||||
|
bit := startbit
|
||||||
|
|
||||||
|
// Find our last ancestor in the tree
|
||||||
|
for bit&mask != 0 {
|
||||||
|
if ip&bit != 0 {
|
||||||
|
next = node.right
|
||||||
|
} else {
|
||||||
|
next = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
if next == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
bit = bit >> 1
|
||||||
|
node = next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build up the rest of the tree we don't already have
|
||||||
|
for bit&mask != 0 {
|
||||||
|
next = &Node{}
|
||||||
|
next.parent = node
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node.right = next
|
||||||
|
} else {
|
||||||
|
node.left = next
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
node = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final node marks our cidr, set the value
|
||||||
|
node.value = val
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds the most specific match
|
||||||
|
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
||||||
|
var node *Node
|
||||||
|
|
||||||
|
wholeIP, ipv4 := isIPV4(ip)
|
||||||
|
if ipv4 {
|
||||||
|
node = tree.root4
|
||||||
|
} else {
|
||||||
|
node = tree.root6
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(wholeIP); i += 4 {
|
||||||
|
ip := iputil.Ip2VpnIp(wholeIP[i : i+4])
|
||||||
|
bit := startbit
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.value != nil {
|
||||||
|
value = node.value
|
||||||
|
}
|
||||||
|
|
||||||
|
if bit == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
|
||||||
|
bit := startbit
|
||||||
|
node := tree.root4
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.value != nil {
|
||||||
|
value = node.value
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
||||||
|
ip := hi
|
||||||
|
node := tree.root6
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
bit := startbit6
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.value != nil {
|
||||||
|
value = node.value
|
||||||
|
}
|
||||||
|
|
||||||
|
if bit == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
ip = lo
|
||||||
|
}
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIPV4(ip net.IP) (net.IP, bool) {
|
||||||
|
if len(ip) == net.IPv4len {
|
||||||
|
return ip, true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff {
|
||||||
|
return ip[12:16], true
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZeros(p net.IP) bool {
|
||||||
|
for i := 0; i < len(p); i++ {
|
||||||
|
if p[i] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
81
cidr/tree6_test.go
Normal file
81
cidr/tree6_test.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
||||||
|
tree := NewTree6()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/24"), "4a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/30"), "4b")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||||
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{"1", "1.0.0.0"},
|
||||||
|
{"1", "1.255.255.255"},
|
||||||
|
{"2", "2.1.0.0"},
|
||||||
|
{"2", "2.1.255.255"},
|
||||||
|
{"3", "3.1.1.0"},
|
||||||
|
{"3", "3.1.1.255"},
|
||||||
|
{"4a", "4.1.1.255"},
|
||||||
|
{"4b", "4.1.1.2"},
|
||||||
|
{"4c", "4.1.1.1"},
|
||||||
|
{"5", "240.0.0.0"},
|
||||||
|
{"5", "255.255.255.255"},
|
||||||
|
{"6a", "1:2:0:4:1:1:1:1"},
|
||||||
|
{"6b", "1:2:0:4:5:1:1:1"},
|
||||||
|
{"6c", "1:2:0:4:5:0:0:0"},
|
||||||
|
{nil, "239.0.0.0"},
|
||||||
|
{nil, "4.1.2.2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
|
||||||
|
}
|
||||||
|
|
||||||
|
tree = NewTree6()
|
||||||
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
|
tree.AddCIDR(Parse("::/0"), "cool6")
|
||||||
|
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
|
||||||
|
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
|
||||||
|
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
|
||||||
|
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
||||||
|
tree := NewTree6()
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{"6a", "1:2:0:4:1:1:1:1"},
|
||||||
|
{"6b", "1:2:0:4:5:1:1:1"},
|
||||||
|
{"6c", "1:2:0:4:5:0:0:0"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
ip := net.ParseIP(tt.IP)
|
||||||
|
hi := binary.BigEndian.Uint64(ip[:8])
|
||||||
|
lo := binary.BigEndian.Uint64(ip[8:])
|
||||||
|
|
||||||
|
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,157 +0,0 @@
|
|||||||
package nebula
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCIDRTree_Contains(t *testing.T) {
|
|
||||||
tree := NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("1.0.0.0/8"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("2.1.0.0/16"), "2")
|
|
||||||
tree.AddCIDR(getCIDR("3.1.1.0/24"), "3")
|
|
||||||
tree.AddCIDR(getCIDR("4.1.1.0/24"), "4a")
|
|
||||||
tree.AddCIDR(getCIDR("4.1.1.1/32"), "4b")
|
|
||||||
tree.AddCIDR(getCIDR("4.1.2.1/32"), "4c")
|
|
||||||
tree.AddCIDR(getCIDR("254.0.0.0/4"), "5")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
Result interface{}
|
|
||||||
IP string
|
|
||||||
}{
|
|
||||||
{"1", "1.0.0.0"},
|
|
||||||
{"1", "1.255.255.255"},
|
|
||||||
{"2", "2.1.0.0"},
|
|
||||||
{"2", "2.1.255.255"},
|
|
||||||
{"3", "3.1.1.0"},
|
|
||||||
{"3", "3.1.1.255"},
|
|
||||||
{"4a", "4.1.1.255"},
|
|
||||||
{"4a", "4.1.1.1"},
|
|
||||||
{"5", "240.0.0.0"},
|
|
||||||
{"5", "255.255.255.255"},
|
|
||||||
{nil, "239.0.0.0"},
|
|
||||||
{nil, "4.1.2.2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
assert.Equal(t, tt.Result, tree.Contains(ip2int(net.ParseIP(tt.IP))))
|
|
||||||
}
|
|
||||||
|
|
||||||
tree = NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
|
|
||||||
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("0.0.0.0"))))
|
|
||||||
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("255.255.255.255"))))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
|
||||||
tree := NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("1.0.0.0/8"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("2.1.0.0/16"), "2")
|
|
||||||
tree.AddCIDR(getCIDR("3.1.1.0/24"), "3")
|
|
||||||
tree.AddCIDR(getCIDR("4.1.1.0/24"), "4a")
|
|
||||||
tree.AddCIDR(getCIDR("4.1.1.0/30"), "4b")
|
|
||||||
tree.AddCIDR(getCIDR("4.1.1.1/32"), "4c")
|
|
||||||
tree.AddCIDR(getCIDR("254.0.0.0/4"), "5")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
Result interface{}
|
|
||||||
IP string
|
|
||||||
}{
|
|
||||||
{"1", "1.0.0.0"},
|
|
||||||
{"1", "1.255.255.255"},
|
|
||||||
{"2", "2.1.0.0"},
|
|
||||||
{"2", "2.1.255.255"},
|
|
||||||
{"3", "3.1.1.0"},
|
|
||||||
{"3", "3.1.1.255"},
|
|
||||||
{"4a", "4.1.1.255"},
|
|
||||||
{"4b", "4.1.1.2"},
|
|
||||||
{"4c", "4.1.1.1"},
|
|
||||||
{"5", "240.0.0.0"},
|
|
||||||
{"5", "255.255.255.255"},
|
|
||||||
{nil, "239.0.0.0"},
|
|
||||||
{nil, "4.1.2.2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(ip2int(net.ParseIP(tt.IP))))
|
|
||||||
}
|
|
||||||
|
|
||||||
tree = NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
|
|
||||||
assert.Equal(t, "cool", tree.MostSpecificContains(ip2int(net.ParseIP("0.0.0.0"))))
|
|
||||||
assert.Equal(t, "cool", tree.MostSpecificContains(ip2int(net.ParseIP("255.255.255.255"))))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCIDRTree_Match(t *testing.T) {
|
|
||||||
tree := NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("4.1.1.0/32"), "1a")
|
|
||||||
tree.AddCIDR(getCIDR("4.1.1.1/32"), "1b")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
Result interface{}
|
|
||||||
IP string
|
|
||||||
}{
|
|
||||||
{"1a", "4.1.1.0"},
|
|
||||||
{"1b", "4.1.1.1"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
assert.Equal(t, tt.Result, tree.Match(ip2int(net.ParseIP(tt.IP))))
|
|
||||||
}
|
|
||||||
|
|
||||||
tree = NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
|
|
||||||
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("0.0.0.0"))))
|
|
||||||
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("255.255.255.255"))))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
|
||||||
tree := NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("1.1.0.0/16"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("1.2.1.1/32"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("192.2.1.1/32"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("172.2.1.1/32"), "1")
|
|
||||||
|
|
||||||
ip := ip2int(net.ParseIP("1.2.1.1"))
|
|
||||||
b.Run("found", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
tree.Contains(ip)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
ip = ip2int(net.ParseIP("1.2.1.255"))
|
|
||||||
b.Run("not found", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
tree.Contains(ip)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCIDRTree_Match(b *testing.B) {
|
|
||||||
tree := NewCIDRTree()
|
|
||||||
tree.AddCIDR(getCIDR("1.1.0.0/16"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("1.2.1.1/32"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("192.2.1.1/32"), "1")
|
|
||||||
tree.AddCIDR(getCIDR("172.2.1.1/32"), "1")
|
|
||||||
|
|
||||||
ip := ip2int(net.ParseIP("1.2.1.1"))
|
|
||||||
b.Run("found", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
tree.Match(ip)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
ip = ip2int(net.ParseIP("1.2.1.255"))
|
|
||||||
b.Run("not found", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
tree.Match(ip)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCIDR(s string) *net.IPNet {
|
|
||||||
_, c, _ := net.ParseCIDR(s)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/skip2/go-qrcode"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
)
|
)
|
||||||
@@ -21,6 +22,7 @@ type caFlags struct {
|
|||||||
duration *time.Duration
|
duration *time.Duration
|
||||||
outKeyPath *string
|
outKeyPath *string
|
||||||
outCertPath *string
|
outCertPath *string
|
||||||
|
outQRPath *string
|
||||||
groups *string
|
groups *string
|
||||||
ips *string
|
ips *string
|
||||||
subnets *string
|
subnets *string
|
||||||
@@ -33,6 +35,7 @@ func newCaFlags() *caFlags {
|
|||||||
cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
||||||
cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to")
|
cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to")
|
||||||
cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to")
|
cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to")
|
||||||
|
cf.outQRPath = cf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
||||||
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
||||||
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use")
|
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use")
|
||||||
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use")
|
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use")
|
||||||
@@ -146,6 +149,18 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *cf.outQRPath != "" {
|
||||||
|
b, err = qrcode.Encode(string(b), qrcode.Medium, -5)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while generating qr code: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -37,6 +38,8 @@ func Test_caHelp(t *testing.T) {
|
|||||||
" \tOptional: path to write the certificate to (default \"ca.crt\")\n"+
|
" \tOptional: path to write the certificate to (default \"ca.crt\")\n"+
|
||||||
" -out-key string\n"+
|
" -out-key string\n"+
|
||||||
" \tOptional: path to write the private key to (default \"ca.key\")\n"+
|
" \tOptional: path to write the private key to (default \"ca.key\")\n"+
|
||||||
|
" -out-qr string\n"+
|
||||||
|
" \tOptional: output a qr code image (png) of the certificate\n"+
|
||||||
" -subnets string\n"+
|
" -subnets string\n"+
|
||||||
" \tOptional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use\n",
|
" \tOptional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use\n",
|
||||||
ob.String(),
|
ob.String(),
|
||||||
|
|||||||
@@ -3,10 +3,11 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
//TODO: all flag parsing continueOnError will print to stderr on its own currently
|
//TODO: all flag parsing continueOnError will print to stderr on its own currently
|
||||||
|
|||||||
@@ -4,16 +4,19 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/skip2/go-qrcode"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
)
|
)
|
||||||
|
|
||||||
type printFlags struct {
|
type printFlags struct {
|
||||||
set *flag.FlagSet
|
set *flag.FlagSet
|
||||||
json *bool
|
json *bool
|
||||||
|
outQRPath *string
|
||||||
path *string
|
path *string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -21,6 +24,7 @@ func newPrintFlags() *printFlags {
|
|||||||
pf := printFlags{set: flag.NewFlagSet("print", flag.ContinueOnError)}
|
pf := printFlags{set: flag.NewFlagSet("print", flag.ContinueOnError)}
|
||||||
pf.set.Usage = func() {}
|
pf.set.Usage = func() {}
|
||||||
pf.json = pf.set.Bool("json", false, "Optional: outputs certificates in json format")
|
pf.json = pf.set.Bool("json", false, "Optional: outputs certificates in json format")
|
||||||
|
pf.outQRPath = pf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
||||||
pf.path = pf.set.String("path", "", "Required: path to the certificate")
|
pf.path = pf.set.String("path", "", "Required: path to the certificate")
|
||||||
|
|
||||||
return &pf
|
return &pf
|
||||||
@@ -43,6 +47,8 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var c *cert.NebulaCertificate
|
var c *cert.NebulaCertificate
|
||||||
|
var qrBytes []byte
|
||||||
|
part := 0
|
||||||
|
|
||||||
for {
|
for {
|
||||||
c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||||
@@ -60,9 +66,31 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
out.Write([]byte("\n"))
|
out.Write([]byte("\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *pf.outQRPath != "" {
|
||||||
|
b, err := c.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while marshalling cert to PEM: %s", err)
|
||||||
|
}
|
||||||
|
qrBytes = append(qrBytes, b...)
|
||||||
|
}
|
||||||
|
|
||||||
if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" {
|
if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
part++
|
||||||
|
}
|
||||||
|
|
||||||
|
if *pf.outQRPath != "" {
|
||||||
|
b, err := qrcode.Encode(string(qrBytes), qrcode.Medium, -5)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while generating qr code: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -2,12 +2,13 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_printSummary(t *testing.T) {
|
func Test_printSummary(t *testing.T) {
|
||||||
@@ -22,6 +23,8 @@ func Test_printHelp(t *testing.T) {
|
|||||||
"Usage of "+os.Args[0]+" print <flags>: prints details about a certificate\n"+
|
"Usage of "+os.Args[0]+" print <flags>: prints details about a certificate\n"+
|
||||||
" -json\n"+
|
" -json\n"+
|
||||||
" \tOptional: outputs certificates in json format\n"+
|
" \tOptional: outputs certificates in json format\n"+
|
||||||
|
" -out-qr string\n"+
|
||||||
|
" \tOptional: output a qr code image (png) of the certificate\n"+
|
||||||
" -path string\n"+
|
" -path string\n"+
|
||||||
" \tRequired: path to the certificate\n",
|
" \tRequired: path to the certificate\n",
|
||||||
ob.String(),
|
ob.String(),
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/skip2/go-qrcode"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"golang.org/x/crypto/curve25519"
|
"golang.org/x/crypto/curve25519"
|
||||||
)
|
)
|
||||||
@@ -25,6 +26,7 @@ type signFlags struct {
|
|||||||
inPubPath *string
|
inPubPath *string
|
||||||
outKeyPath *string
|
outKeyPath *string
|
||||||
outCertPath *string
|
outCertPath *string
|
||||||
|
outQRPath *string
|
||||||
groups *string
|
groups *string
|
||||||
subnets *string
|
subnets *string
|
||||||
}
|
}
|
||||||
@@ -40,8 +42,9 @@ func newSignFlags() *signFlags {
|
|||||||
sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key")
|
sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key")
|
||||||
sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to")
|
sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to")
|
||||||
sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to")
|
sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to")
|
||||||
|
sf.outQRPath = sf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
||||||
sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups")
|
sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups")
|
||||||
sf.subnets = sf.set.String("subnets", "", "Optional: comma seperated list of subnet this cert can serve for")
|
sf.subnets = sf.set.String("subnets", "", "Optional: comma separated list of subnet this cert can serve for")
|
||||||
return &sf
|
return &sf
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -89,6 +92,10 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := caCert.VerifyPrivateKey(caKey); err != nil {
|
||||||
|
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
||||||
|
}
|
||||||
|
|
||||||
issuer, err := caCert.Sha256Sum()
|
issuer, err := caCert.Sha256Sum()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err)
|
return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err)
|
||||||
@@ -203,16 +210,33 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *sf.outQRPath != "" {
|
||||||
|
b, err = qrcode.Encode(string(b), qrcode.Medium, -5)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while generating qr code: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func x25519Keypair() ([]byte, []byte) {
|
func x25519Keypair() ([]byte, []byte) {
|
||||||
var pubkey, privkey [32]byte
|
privkey := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey[:]); err != nil {
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
curve25519.ScalarBaseMult(&pubkey, &privkey)
|
|
||||||
return pubkey[:], privkey[:]
|
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubkey, privkey
|
||||||
}
|
}
|
||||||
|
|
||||||
func signSummary() string {
|
func signSummary() string {
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -45,8 +46,10 @@ func Test_signHelp(t *testing.T) {
|
|||||||
" \tOptional: path to write the certificate to\n"+
|
" \tOptional: path to write the certificate to\n"+
|
||||||
" -out-key string\n"+
|
" -out-key string\n"+
|
||||||
" \tOptional (if in-pub not set): path to write the private key to\n"+
|
" \tOptional (if in-pub not set): path to write the private key to\n"+
|
||||||
|
" -out-qr string\n"+
|
||||||
|
" \tOptional: output a qr code image (png) of the certificate\n"+
|
||||||
" -subnets string\n"+
|
" -subnets string\n"+
|
||||||
" \tOptional: comma seperated list of subnet this cert can serve for\n",
|
" \tOptional: comma separated list of subnet this cert can serve for\n",
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -165,6 +168,20 @@ func Test_signCert(t *testing.T) {
|
|||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
|
// mismatched ca key
|
||||||
|
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
|
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(caKeyF2.Name())
|
||||||
|
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
|
||||||
|
|
||||||
|
ob.Reset()
|
||||||
|
eb.Reset()
|
||||||
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||||
|
assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate does not match private key")
|
||||||
|
assert.Empty(t, ob.String())
|
||||||
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// failed key write
|
// failed key write
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
@@ -286,5 +303,4 @@ func Test_signCert(t *testing.T) {
|
|||||||
assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name())
|
assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,12 +3,13 @@ package main
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
)
|
)
|
||||||
|
|
||||||
type verifyFlags struct {
|
type verifyFlags struct {
|
||||||
|
|||||||
@@ -3,13 +3,14 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"golang.org/x/crypto/ed25519"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/crypto/ed25519"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_verifySummary(t *testing.T) {
|
func Test_verifySummary(t *testing.T) {
|
||||||
|
|||||||
10
cmd/nebula-service/logs_generic.go
Normal file
10
cmd/nebula-service/logs_generic.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
//go:build !windows
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
func HookLogger(l *logrus.Logger) {
|
||||||
|
// Do nothing, let the logs flow to stdout/stderr
|
||||||
|
}
|
||||||
54
cmd/nebula-service/logs_windows.go
Normal file
54
cmd/nebula-service/logs_windows.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/kardianos/service"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HookLogger routes the logrus logs through the service logger so that they end up in the Windows Event Viewer
|
||||||
|
// logrus output will be discarded
|
||||||
|
func HookLogger(l *logrus.Logger) {
|
||||||
|
l.AddHook(newLogHook(logger))
|
||||||
|
l.SetOutput(ioutil.Discard)
|
||||||
|
}
|
||||||
|
|
||||||
|
type logHook struct {
|
||||||
|
sl service.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLogHook(sl service.Logger) *logHook {
|
||||||
|
return &logHook{sl: sl}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *logHook) Fire(entry *logrus.Entry) error {
|
||||||
|
line, err := entry.String()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch entry.Level {
|
||||||
|
case logrus.PanicLevel:
|
||||||
|
return h.sl.Error(line)
|
||||||
|
case logrus.FatalLevel:
|
||||||
|
return h.sl.Error(line)
|
||||||
|
case logrus.ErrorLevel:
|
||||||
|
return h.sl.Error(line)
|
||||||
|
case logrus.WarnLevel:
|
||||||
|
return h.sl.Warning(line)
|
||||||
|
case logrus.InfoLevel:
|
||||||
|
return h.sl.Info(line)
|
||||||
|
case logrus.DebugLevel:
|
||||||
|
return h.sl.Info(line)
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *logHook) Levels() []logrus.Level {
|
||||||
|
return logrus.AllLevels
|
||||||
|
}
|
||||||
@@ -5,7 +5,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A version string that can be set with
|
// A version string that can be set with
|
||||||
@@ -45,5 +47,31 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
nebula.Main(*configPath, *configTest, Build)
|
l := logrus.New()
|
||||||
|
l.Out = os.Stdout
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
err := c.Load(*configPath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("failed to load config: %s", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||||
|
|
||||||
|
switch v := err.(type) {
|
||||||
|
case nebula.ContextualError:
|
||||||
|
v.Log(l)
|
||||||
|
os.Exit(1)
|
||||||
|
case error:
|
||||||
|
l.WithError(err).Error("Failed to start")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*configTest {
|
||||||
|
ctrl.Start()
|
||||||
|
ctrl.ShutdownBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,44 +1,55 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/kardianos/service"
|
"github.com/kardianos/service"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
var logger service.Logger
|
var logger service.Logger
|
||||||
|
|
||||||
type program struct {
|
type program struct {
|
||||||
exit chan struct{}
|
|
||||||
configPath *string
|
configPath *string
|
||||||
configTest *bool
|
configTest *bool
|
||||||
build string
|
build string
|
||||||
|
control *nebula.Control
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *program) Start(s service.Service) error {
|
func (p *program) Start(s service.Service) error {
|
||||||
logger.Info("Nebula service starting.")
|
|
||||||
p.exit = make(chan struct{})
|
|
||||||
// Start should not block.
|
// Start should not block.
|
||||||
go p.run()
|
logger.Info("Nebula service starting.")
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *program) run() error {
|
l := logrus.New()
|
||||||
nebula.Main(*p.configPath, *p.configTest, Build)
|
HookLogger(l)
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
err := c.Load(*p.configPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load config: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.control, err = nebula.Main(c, *p.configTest, Build, l, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.control.Start()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *program) Stop(s service.Service) error {
|
func (p *program) Stop(s service.Service) error {
|
||||||
logger.Info("Nebula service stopping.")
|
logger.Info("Nebula service stopping.")
|
||||||
close(p.exit)
|
p.control.Stop()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
|
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
|
||||||
|
|
||||||
if *configPath == "" {
|
if *configPath == "" {
|
||||||
ex, err := os.Executable()
|
ex, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -60,6 +71,10 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
|
|||||||
build: build,
|
build: build,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Here are what the different loggers are doing:
|
||||||
|
// - `log` is the standard go log utility, meant to be used while the process is still attached to stdout/stderr
|
||||||
|
// - `logger` is the service log utility that may be attached to a special place depending on OS (Windows will have it attached to the event log)
|
||||||
|
// - above, in `Run` we create a `logrus.Logger` which is what nebula expects to use
|
||||||
s, err := service.New(prg, svcConfig)
|
s, err := service.New(prg, svcConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
@@ -75,6 +90,7 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
|
|||||||
for {
|
for {
|
||||||
err := <-errs
|
err := <-errs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Route any errors from the system logger to stdout as a best effort to notice issues there
|
||||||
log.Print(err)
|
log.Print(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -84,6 +100,7 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
|
|||||||
case "run":
|
case "run":
|
||||||
err = s.Run()
|
err = s.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Route any errors to the system logger
|
||||||
logger.Error(err)
|
logger.Error(err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -5,7 +5,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A version string that can be set with
|
// A version string that can be set with
|
||||||
@@ -39,5 +41,31 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
nebula.Main(*configPath, *configTest, Build)
|
l := logrus.New()
|
||||||
|
l.Out = os.Stdout
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
err := c.Load(*configPath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("failed to load config: %s", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||||
|
|
||||||
|
switch v := err.(type) {
|
||||||
|
case nebula.ContextualError:
|
||||||
|
v.Log(l)
|
||||||
|
os.Exit(1)
|
||||||
|
case error:
|
||||||
|
l.WithError(err).Error("Failed to start")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*configTest {
|
||||||
|
ctrl.Start()
|
||||||
|
ctrl.ShutdownBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,39 +1,42 @@
|
|||||||
package nebula
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/imdario/mergo"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/imdario/mergo"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type C struct {
|
||||||
path string
|
path string
|
||||||
files []string
|
files []string
|
||||||
Settings map[interface{}]interface{}
|
Settings map[interface{}]interface{}
|
||||||
oldSettings map[interface{}]interface{}
|
oldSettings map[interface{}]interface{}
|
||||||
callbacks []func(*Config)
|
callbacks []func(*C)
|
||||||
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConfig() *Config {
|
func NewC(l *logrus.Logger) *C {
|
||||||
return &Config{
|
return &C{
|
||||||
Settings: make(map[interface{}]interface{}),
|
Settings: make(map[interface{}]interface{}),
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load will find all yaml files within path and load them in lexical order
|
// Load will find all yaml files within path and load them in lexical order
|
||||||
func (c *Config) Load(path string) error {
|
func (c *C) Load(path string) error {
|
||||||
c.path = path
|
c.path = path
|
||||||
c.files = make([]string, 0)
|
c.files = make([]string, 0)
|
||||||
|
|
||||||
@@ -56,11 +59,18 @@ func (c *Config) Load(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *C) LoadString(raw string) error {
|
||||||
|
if raw == "" {
|
||||||
|
return errors.New("Empty configuration")
|
||||||
|
}
|
||||||
|
return c.parseRaw([]byte(raw))
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterReloadCallback stores a function to be called when a config reload is triggered. The functions registered
|
// RegisterReloadCallback stores a function to be called when a config reload is triggered. The functions registered
|
||||||
// here should decide if they need to make a change to the current process before making the change. HasChanged can be
|
// here should decide if they need to make a change to the current process before making the change. HasChanged can be
|
||||||
// used to help decide if a change is necessary.
|
// used to help decide if a change is necessary.
|
||||||
// These functions should return quickly or spawn their own go routine if they will take a while
|
// These functions should return quickly or spawn their own go routine if they will take a while
|
||||||
func (c *Config) RegisterReloadCallback(f func(*Config)) {
|
func (c *C) RegisterReloadCallback(f func(*C)) {
|
||||||
c.callbacks = append(c.callbacks, f)
|
c.callbacks = append(c.callbacks, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +79,7 @@ func (c *Config) RegisterReloadCallback(f func(*Config)) {
|
|||||||
// If k is an empty string the entire config is tested.
|
// If k is an empty string the entire config is tested.
|
||||||
// It's important to note that this is very rudimentary and susceptible to configuration ordering issues indicating
|
// It's important to note that this is very rudimentary and susceptible to configuration ordering issues indicating
|
||||||
// there is change when there actually wasn't any.
|
// there is change when there actually wasn't any.
|
||||||
func (c *Config) HasChanged(k string) bool {
|
func (c *C) HasChanged(k string) bool {
|
||||||
if c.oldSettings == nil {
|
if c.oldSettings == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -90,12 +100,12 @@ func (c *Config) HasChanged(k string) bool {
|
|||||||
|
|
||||||
newVals, err := yaml.Marshal(nv)
|
newVals, err := yaml.Marshal(nv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("config_path", k).WithError(err).Error("Error while marshaling new config")
|
c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling new config")
|
||||||
}
|
}
|
||||||
|
|
||||||
oldVals, err := yaml.Marshal(ov)
|
oldVals, err := yaml.Marshal(ov)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("config_path", k).WithError(err).Error("Error while marshaling old config")
|
c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling old config")
|
||||||
}
|
}
|
||||||
|
|
||||||
return string(newVals) != string(oldVals)
|
return string(newVals) != string(oldVals)
|
||||||
@@ -103,19 +113,26 @@ func (c *Config) HasChanged(k string) bool {
|
|||||||
|
|
||||||
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
|
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
|
||||||
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
|
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
|
||||||
func (c *Config) CatchHUP() {
|
func (c *C) CatchHUP(ctx context.Context) {
|
||||||
ch := make(chan os.Signal, 1)
|
ch := make(chan os.Signal, 1)
|
||||||
signal.Notify(ch, syscall.SIGHUP)
|
signal.Notify(ch, syscall.SIGHUP)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for range ch {
|
for {
|
||||||
l.Info("Caught HUP, reloading config")
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
signal.Stop(ch)
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
case <-ch:
|
||||||
|
c.l.Info("Caught HUP, reloading config")
|
||||||
c.ReloadConfig()
|
c.ReloadConfig()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) ReloadConfig() {
|
func (c *C) ReloadConfig() {
|
||||||
c.oldSettings = make(map[interface{}]interface{})
|
c.oldSettings = make(map[interface{}]interface{})
|
||||||
for k, v := range c.Settings {
|
for k, v := range c.Settings {
|
||||||
c.oldSettings[k] = v
|
c.oldSettings[k] = v
|
||||||
@@ -123,7 +140,7 @@ func (c *Config) ReloadConfig() {
|
|||||||
|
|
||||||
err := c.Load(c.path)
|
err := c.Load(c.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("config_path", c.path).WithError(err).Error("Error occurred while reloading config")
|
c.l.WithField("config_path", c.path).WithError(err).Error("Error occurred while reloading config")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,7 +150,7 @@ func (c *Config) ReloadConfig() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetString will get the string for k or return the default d if not found or invalid
|
// GetString will get the string for k or return the default d if not found or invalid
|
||||||
func (c *Config) GetString(k, d string) string {
|
func (c *C) GetString(k, d string) string {
|
||||||
r := c.Get(k)
|
r := c.Get(k)
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return d
|
return d
|
||||||
@@ -143,7 +160,7 @@ func (c *Config) GetString(k, d string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetStringSlice will get the slice of strings for k or return the default d if not found or invalid
|
// GetStringSlice will get the slice of strings for k or return the default d if not found or invalid
|
||||||
func (c *Config) GetStringSlice(k string, d []string) []string {
|
func (c *C) GetStringSlice(k string, d []string) []string {
|
||||||
r := c.Get(k)
|
r := c.Get(k)
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return d
|
return d
|
||||||
@@ -163,7 +180,7 @@ func (c *Config) GetStringSlice(k string, d []string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMap will get the map for k or return the default d if not found or invalid
|
// GetMap will get the map for k or return the default d if not found or invalid
|
||||||
func (c *Config) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} {
|
func (c *C) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} {
|
||||||
r := c.Get(k)
|
r := c.Get(k)
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return d
|
return d
|
||||||
@@ -178,7 +195,7 @@ func (c *Config) GetMap(k string, d map[interface{}]interface{}) map[interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetInt will get the int for k or return the default d if not found or invalid
|
// GetInt will get the int for k or return the default d if not found or invalid
|
||||||
func (c *Config) GetInt(k string, d int) int {
|
func (c *C) GetInt(k string, d int) int {
|
||||||
r := c.GetString(k, strconv.Itoa(d))
|
r := c.GetString(k, strconv.Itoa(d))
|
||||||
v, err := strconv.Atoi(r)
|
v, err := strconv.Atoi(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -189,7 +206,7 @@ func (c *Config) GetInt(k string, d int) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetBool will get the bool for k or return the default d if not found or invalid
|
// GetBool will get the bool for k or return the default d if not found or invalid
|
||||||
func (c *Config) GetBool(k string, d bool) bool {
|
func (c *C) GetBool(k string, d bool) bool {
|
||||||
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
|
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
|
||||||
v, err := strconv.ParseBool(r)
|
v, err := strconv.ParseBool(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -206,7 +223,7 @@ func (c *Config) GetBool(k string, d bool) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetDuration will get the duration for k or return the default d if not found or invalid
|
// GetDuration will get the duration for k or return the default d if not found or invalid
|
||||||
func (c *Config) GetDuration(k string, d time.Duration) time.Duration {
|
func (c *C) GetDuration(k string, d time.Duration) time.Duration {
|
||||||
r := c.GetString(k, "")
|
r := c.GetString(k, "")
|
||||||
v, err := time.ParseDuration(r)
|
v, err := time.ParseDuration(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -215,138 +232,15 @@ func (c *Config) GetDuration(k string, d time.Duration) time.Duration {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) GetAllowList(k string, allowInterfaces bool) (*AllowList, error) {
|
func (c *C) Get(k string) interface{} {
|
||||||
r := c.Get(k)
|
|
||||||
if r == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rawMap, ok := r.(map[interface{}]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
tree := NewCIDRTree()
|
|
||||||
var nameRules []AllowListNameRule
|
|
||||||
|
|
||||||
firstValue := true
|
|
||||||
allValuesMatch := true
|
|
||||||
defaultSet := false
|
|
||||||
var allValues bool
|
|
||||||
|
|
||||||
for rawKey, rawValue := range rawMap {
|
|
||||||
rawCIDR, ok := rawKey.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special rule for interface names
|
|
||||||
if rawCIDR == "interfaces" {
|
|
||||||
if !allowInterfaces {
|
|
||||||
return nil, fmt.Errorf("config `%s` does not support `interfaces`", k)
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
nameRules, err = c.getAllowListInterfaces(k, rawValue)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
value, ok := rawValue.(bool)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, cidr, err := net.ParseCIDR(rawCIDR)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: should we error on duplicate CIDRs in the config?
|
|
||||||
tree.AddCIDR(cidr, value)
|
|
||||||
|
|
||||||
if firstValue {
|
|
||||||
allValues = value
|
|
||||||
firstValue = false
|
|
||||||
} else {
|
|
||||||
if value != allValues {
|
|
||||||
allValuesMatch = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is 0.0.0.0/0
|
|
||||||
bits, size := cidr.Mask.Size()
|
|
||||||
if bits == 0 && size == 32 {
|
|
||||||
defaultSet = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !defaultSet {
|
|
||||||
if allValuesMatch {
|
|
||||||
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
|
|
||||||
tree.AddCIDR(zeroCIDR, !allValues)
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &AllowList{cidrTree: tree, nameRules: nameRules}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) {
|
|
||||||
var nameRules []AllowListNameRule
|
|
||||||
|
|
||||||
rawRules, ok := v.(map[interface{}]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
firstEntry := true
|
|
||||||
var allValues bool
|
|
||||||
for rawName, rawAllow := range rawRules {
|
|
||||||
name, ok := rawName.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName)
|
|
||||||
}
|
|
||||||
allow, ok := rawAllow.(bool)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
|
|
||||||
}
|
|
||||||
|
|
||||||
nameRE, err := regexp.Compile("^" + name + "$")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("config `%s.interfaces` has invalid key: %s: %v", k, name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nameRules = append(nameRules, AllowListNameRule{
|
|
||||||
Name: nameRE,
|
|
||||||
Allow: allow,
|
|
||||||
})
|
|
||||||
|
|
||||||
if firstEntry {
|
|
||||||
allValues = allow
|
|
||||||
firstEntry = false
|
|
||||||
} else {
|
|
||||||
if allow != allValues {
|
|
||||||
return nil, fmt.Errorf("config `%s.interfaces` values must all be the same true/false value", k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nameRules, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Get(k string) interface{} {
|
|
||||||
return c.get(k, c.Settings)
|
return c.get(k, c.Settings)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) IsSet(k string) bool {
|
func (c *C) IsSet(k string) bool {
|
||||||
return c.get(k, c.Settings) != nil
|
return c.get(k, c.Settings) != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) get(k string, v interface{}) interface{} {
|
func (c *C) get(k string, v interface{}) interface{} {
|
||||||
parts := strings.Split(k, ".")
|
parts := strings.Split(k, ".")
|
||||||
for _, p := range parts {
|
for _, p := range parts {
|
||||||
m, ok := v.(map[interface{}]interface{})
|
m, ok := v.(map[interface{}]interface{})
|
||||||
@@ -365,7 +259,7 @@ func (c *Config) get(k string, v interface{}) interface{} {
|
|||||||
|
|
||||||
// direct signifies if this is the config path directly specified by the user,
|
// direct signifies if this is the config path directly specified by the user,
|
||||||
// versus a file/dir found by recursing into that path
|
// versus a file/dir found by recursing into that path
|
||||||
func (c *Config) resolve(path string, direct bool) error {
|
func (c *C) resolve(path string, direct bool) error {
|
||||||
i, err := os.Stat(path)
|
i, err := os.Stat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -391,7 +285,7 @@ func (c *Config) resolve(path string, direct bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) addFile(path string, direct bool) error {
|
func (c *C) addFile(path string, direct bool) error {
|
||||||
ext := filepath.Ext(path)
|
ext := filepath.Ext(path)
|
||||||
|
|
||||||
if !direct && ext != ".yaml" && ext != ".yml" {
|
if !direct && ext != ".yaml" && ext != ".yml" {
|
||||||
@@ -407,7 +301,19 @@ func (c *Config) addFile(path string, direct bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) parse() error {
|
func (c *C) parseRaw(b []byte) error {
|
||||||
|
var m map[interface{}]interface{}
|
||||||
|
|
||||||
|
err := yaml.Unmarshal(b, &m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Settings = m
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *C) parse() error {
|
||||||
var m map[interface{}]interface{}
|
var m map[interface{}]interface{}
|
||||||
|
|
||||||
for _, path := range c.files {
|
for _, path := range c.files {
|
||||||
@@ -450,35 +356,3 @@ func readDirNames(path string) ([]string, error) {
|
|||||||
sort.Strings(paths)
|
sort.Strings(paths)
|
||||||
return paths, nil
|
return paths, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func configLogger(c *Config) error {
|
|
||||||
// set up our logging level
|
|
||||||
logLevel, err := logrus.ParseLevel(strings.ToLower(c.GetString("logging.level", "info")))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s; possible levels: %s", err, logrus.AllLevels)
|
|
||||||
}
|
|
||||||
l.SetLevel(logLevel)
|
|
||||||
|
|
||||||
timestampFormat := c.GetString("logging.timestamp_format", "")
|
|
||||||
fullTimestamp := (timestampFormat != "")
|
|
||||||
if timestampFormat == "" {
|
|
||||||
timestampFormat = time.RFC3339
|
|
||||||
}
|
|
||||||
|
|
||||||
logFormat := strings.ToLower(c.GetString("logging.format", "text"))
|
|
||||||
switch logFormat {
|
|
||||||
case "text":
|
|
||||||
l.Formatter = &logrus.TextFormatter{
|
|
||||||
TimestampFormat: timestampFormat,
|
|
||||||
FullTimestamp: fullTimestamp,
|
|
||||||
}
|
|
||||||
case "json":
|
|
||||||
l.Formatter = &logrus.JSONFormatter{
|
|
||||||
TimestampFormat: timestampFormat,
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unknown log format `%s`. possible formats: %s", logFormat, []string{"text", "json"})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,23 +1,26 @@
|
|||||||
package nebula
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig_Load(t *testing.T) {
|
func TestConfig_Load(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
dir, err := ioutil.TempDir("", "config-test")
|
dir, err := ioutil.TempDir("", "config-test")
|
||||||
// invalid yaml
|
// invalid yaml
|
||||||
c := NewConfig()
|
c := NewC(l)
|
||||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||||
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||||
|
|
||||||
// simple multi config merge
|
// simple multi config merge
|
||||||
c = NewConfig()
|
c = NewC(l)
|
||||||
os.RemoveAll(dir)
|
os.RemoveAll(dir)
|
||||||
os.Mkdir(dir, 0755)
|
os.Mkdir(dir, 0755)
|
||||||
|
|
||||||
@@ -39,8 +42,9 @@ func TestConfig_Load(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_Get(t *testing.T) {
|
func TestConfig_Get(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
// test simple type
|
// test simple type
|
||||||
c := NewConfig()
|
c := NewC(l)
|
||||||
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"}
|
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"}
|
||||||
assert.Equal(t, "hi", c.Get("firewall.outbound"))
|
assert.Equal(t, "hi", c.Get("firewall.outbound"))
|
||||||
|
|
||||||
@@ -54,13 +58,15 @@ func TestConfig_Get(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_GetStringSlice(t *testing.T) {
|
func TestConfig_GetStringSlice(t *testing.T) {
|
||||||
c := NewConfig()
|
l := util.NewTestLogger()
|
||||||
|
c := NewC(l)
|
||||||
c.Settings["slice"] = []interface{}{"one", "two"}
|
c.Settings["slice"] = []interface{}{"one", "two"}
|
||||||
assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{}))
|
assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_GetBool(t *testing.T) {
|
func TestConfig_GetBool(t *testing.T) {
|
||||||
c := NewConfig()
|
l := util.NewTestLogger()
|
||||||
|
c := NewC(l)
|
||||||
c.Settings["bool"] = true
|
c.Settings["bool"] = true
|
||||||
assert.Equal(t, true, c.GetBool("bool", false))
|
assert.Equal(t, true, c.GetBool("bool", false))
|
||||||
|
|
||||||
@@ -86,91 +92,22 @@ func TestConfig_GetBool(t *testing.T) {
|
|||||||
assert.Equal(t, false, c.GetBool("bool", true))
|
assert.Equal(t, false, c.GetBool("bool", true))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_GetAllowList(t *testing.T) {
|
|
||||||
c := NewConfig()
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"192.168.0.0": true,
|
|
||||||
}
|
|
||||||
r, err := c.GetAllowList("allowlist", false)
|
|
||||||
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
|
|
||||||
assert.Nil(t, r)
|
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"192.168.0.0/16": "abc",
|
|
||||||
}
|
|
||||||
r, err = c.GetAllowList("allowlist", false)
|
|
||||||
assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
|
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"192.168.0.0/16": true,
|
|
||||||
"10.0.0.0/8": false,
|
|
||||||
}
|
|
||||||
r, err = c.GetAllowList("allowlist", false)
|
|
||||||
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
|
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"0.0.0.0/0": true,
|
|
||||||
"10.0.0.0/8": false,
|
|
||||||
"10.42.42.0/24": true,
|
|
||||||
}
|
|
||||||
r, err = c.GetAllowList("allowlist", false)
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.NotNil(t, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test interface names
|
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"interfaces": map[interface{}]interface{}{
|
|
||||||
`docker.*`: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r, err = c.GetAllowList("allowlist", false)
|
|
||||||
assert.EqualError(t, err, "config `allowlist` does not support `interfaces`")
|
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"interfaces": map[interface{}]interface{}{
|
|
||||||
`docker.*`: "foo",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r, err = c.GetAllowList("allowlist", true)
|
|
||||||
assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
|
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"interfaces": map[interface{}]interface{}{
|
|
||||||
`docker.*`: false,
|
|
||||||
`eth.*`: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r, err = c.GetAllowList("allowlist", true)
|
|
||||||
assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
|
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
|
||||||
"interfaces": map[interface{}]interface{}{
|
|
||||||
`docker.*`: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r, err = c.GetAllowList("allowlist", true)
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.NotNil(t, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_HasChanged(t *testing.T) {
|
func TestConfig_HasChanged(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
// No reload has occurred, return false
|
// No reload has occurred, return false
|
||||||
c := NewConfig()
|
c := NewC(l)
|
||||||
c.Settings["test"] = "hi"
|
c.Settings["test"] = "hi"
|
||||||
assert.False(t, c.HasChanged(""))
|
assert.False(t, c.HasChanged(""))
|
||||||
|
|
||||||
// Test key change
|
// Test key change
|
||||||
c = NewConfig()
|
c = NewC(l)
|
||||||
c.Settings["test"] = "hi"
|
c.Settings["test"] = "hi"
|
||||||
c.oldSettings = map[interface{}]interface{}{"test": "no"}
|
c.oldSettings = map[interface{}]interface{}{"test": "no"}
|
||||||
assert.True(t, c.HasChanged("test"))
|
assert.True(t, c.HasChanged("test"))
|
||||||
assert.True(t, c.HasChanged(""))
|
assert.True(t, c.HasChanged(""))
|
||||||
|
|
||||||
// No key change
|
// No key change
|
||||||
c = NewConfig()
|
c = NewC(l)
|
||||||
c.Settings["test"] = "hi"
|
c.Settings["test"] = "hi"
|
||||||
c.oldSettings = map[interface{}]interface{}{"test": "hi"}
|
c.oldSettings = map[interface{}]interface{}{"test": "hi"}
|
||||||
assert.False(t, c.HasChanged("test"))
|
assert.False(t, c.HasChanged("test"))
|
||||||
@@ -178,12 +115,13 @@ func TestConfig_HasChanged(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_ReloadConfig(t *testing.T) {
|
func TestConfig_ReloadConfig(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
done := make(chan bool, 1)
|
done := make(chan bool, 1)
|
||||||
dir, err := ioutil.TempDir("", "config-test")
|
dir, err := ioutil.TempDir("", "config-test")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||||
|
|
||||||
c := NewConfig()
|
c := NewC(l)
|
||||||
assert.Nil(t, c.Load(dir))
|
assert.Nil(t, c.Load(dir))
|
||||||
|
|
||||||
assert.False(t, c.HasChanged("outer.inner"))
|
assert.False(t, c.HasChanged("outer.inner"))
|
||||||
@@ -192,7 +130,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
|
|||||||
|
|
||||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||||
|
|
||||||
c.RegisterReloadCallback(func(c *Config) {
|
c.RegisterReloadCallback(func(c *C) {
|
||||||
done <- true
|
done <- true
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1,10 +1,13 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet
|
// TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet
|
||||||
@@ -12,47 +15,49 @@ import (
|
|||||||
|
|
||||||
type connectionManager struct {
|
type connectionManager struct {
|
||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
in map[uint32]struct{}
|
in map[iputil.VpnIp]struct{}
|
||||||
inLock *sync.RWMutex
|
inLock *sync.RWMutex
|
||||||
inCount int
|
inCount int
|
||||||
out map[uint32]struct{}
|
out map[iputil.VpnIp]struct{}
|
||||||
outLock *sync.RWMutex
|
outLock *sync.RWMutex
|
||||||
outCount int
|
outCount int
|
||||||
TrafficTimer *SystemTimerWheel
|
TrafficTimer *SystemTimerWheel
|
||||||
intf *Interface
|
intf *Interface
|
||||||
|
|
||||||
pendingDeletion map[uint32]int
|
pendingDeletion map[iputil.VpnIp]int
|
||||||
pendingDeletionLock *sync.RWMutex
|
pendingDeletionLock *sync.RWMutex
|
||||||
pendingDeletionTimer *SystemTimerWheel
|
pendingDeletionTimer *SystemTimerWheel
|
||||||
|
|
||||||
checkInterval int
|
checkInterval int
|
||||||
pendingDeletionInterval int
|
pendingDeletionInterval int
|
||||||
|
|
||||||
|
l *logrus.Logger
|
||||||
// I wanted to call one matLock
|
// I wanted to call one matLock
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnectionManager(intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager {
|
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager {
|
||||||
nc := &connectionManager{
|
nc := &connectionManager{
|
||||||
hostMap: intf.hostMap,
|
hostMap: intf.hostMap,
|
||||||
in: make(map[uint32]struct{}),
|
in: make(map[iputil.VpnIp]struct{}),
|
||||||
inLock: &sync.RWMutex{},
|
inLock: &sync.RWMutex{},
|
||||||
inCount: 0,
|
inCount: 0,
|
||||||
out: make(map[uint32]struct{}),
|
out: make(map[iputil.VpnIp]struct{}),
|
||||||
outLock: &sync.RWMutex{},
|
outLock: &sync.RWMutex{},
|
||||||
outCount: 0,
|
outCount: 0,
|
||||||
TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
||||||
intf: intf,
|
intf: intf,
|
||||||
pendingDeletion: make(map[uint32]int),
|
pendingDeletion: make(map[iputil.VpnIp]int),
|
||||||
pendingDeletionLock: &sync.RWMutex{},
|
pendingDeletionLock: &sync.RWMutex{},
|
||||||
pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
|
||||||
checkInterval: checkInterval,
|
checkInterval: checkInterval,
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
pendingDeletionInterval: pendingDeletionInterval,
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
nc.Start()
|
nc.Start(ctx)
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) In(ip uint32) {
|
func (n *connectionManager) In(ip iputil.VpnIp) {
|
||||||
n.inLock.RLock()
|
n.inLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.in[ip]; ok {
|
if _, ok := n.in[ip]; ok {
|
||||||
@@ -65,7 +70,7 @@ func (n *connectionManager) In(ip uint32) {
|
|||||||
n.inLock.Unlock()
|
n.inLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Out(ip uint32) {
|
func (n *connectionManager) Out(ip iputil.VpnIp) {
|
||||||
n.outLock.RLock()
|
n.outLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.out[ip]; ok {
|
if _, ok := n.out[ip]; ok {
|
||||||
@@ -84,9 +89,9 @@ func (n *connectionManager) Out(ip uint32) {
|
|||||||
n.outLock.Unlock()
|
n.outLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) CheckIn(vpnIP uint32) bool {
|
func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool {
|
||||||
n.inLock.RLock()
|
n.inLock.RLock()
|
||||||
if _, ok := n.in[vpnIP]; ok {
|
if _, ok := n.in[vpnIp]; ok {
|
||||||
n.inLock.RUnlock()
|
n.inLock.RUnlock()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -94,7 +99,7 @@ func (n *connectionManager) CheckIn(vpnIP uint32) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) ClearIP(ip uint32) {
|
func (n *connectionManager) ClearIP(ip iputil.VpnIp) {
|
||||||
n.inLock.Lock()
|
n.inLock.Lock()
|
||||||
n.outLock.Lock()
|
n.outLock.Lock()
|
||||||
delete(n.in, ip)
|
delete(n.in, ip)
|
||||||
@@ -103,13 +108,13 @@ func (n *connectionManager) ClearIP(ip uint32) {
|
|||||||
n.outLock.Unlock()
|
n.outLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) ClearPendingDeletion(ip uint32) {
|
func (n *connectionManager) ClearPendingDeletion(ip iputil.VpnIp) {
|
||||||
n.pendingDeletionLock.Lock()
|
n.pendingDeletionLock.Lock()
|
||||||
delete(n.pendingDeletion, ip)
|
delete(n.pendingDeletion, ip)
|
||||||
n.pendingDeletionLock.Unlock()
|
n.pendingDeletionLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) AddPendingDeletion(ip uint32) {
|
func (n *connectionManager) AddPendingDeletion(ip iputil.VpnIp) {
|
||||||
n.pendingDeletionLock.Lock()
|
n.pendingDeletionLock.Lock()
|
||||||
if _, ok := n.pendingDeletion[ip]; ok {
|
if _, ok := n.pendingDeletion[ip]; ok {
|
||||||
n.pendingDeletion[ip] += 1
|
n.pendingDeletion[ip] += 1
|
||||||
@@ -120,7 +125,7 @@ func (n *connectionManager) AddPendingDeletion(ip uint32) {
|
|||||||
n.pendingDeletionLock.Unlock()
|
n.pendingDeletionLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) checkPendingDeletion(ip uint32) bool {
|
func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool {
|
||||||
n.pendingDeletionLock.RLock()
|
n.pendingDeletionLock.RLock()
|
||||||
if _, ok := n.pendingDeletion[ip]; ok {
|
if _, ok := n.pendingDeletion[ip]; ok {
|
||||||
|
|
||||||
@@ -131,24 +136,34 @@ func (n *connectionManager) checkPendingDeletion(ip uint32) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) AddTrafficWatch(vpnIP uint32, seconds int) {
|
func (n *connectionManager) AddTrafficWatch(vpnIp iputil.VpnIp, seconds int) {
|
||||||
n.TrafficTimer.Add(vpnIP, time.Second*time.Duration(seconds))
|
n.TrafficTimer.Add(vpnIp, time.Second*time.Duration(seconds))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Start() {
|
func (n *connectionManager) Start(ctx context.Context) {
|
||||||
go n.Run()
|
go n.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Run() {
|
func (n *connectionManager) Run(ctx context.Context) {
|
||||||
clockSource := time.Tick(500 * time.Millisecond)
|
clockSource := time.NewTicker(500 * time.Millisecond)
|
||||||
|
defer clockSource.Stop()
|
||||||
|
|
||||||
for now := range clockSource {
|
p := []byte("")
|
||||||
n.HandleMonitorTick(now)
|
nb := make([]byte, 12, 12)
|
||||||
|
out := make([]byte, mtu)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case now := <-clockSource.C:
|
||||||
|
n.HandleMonitorTick(now, p, nb, out)
|
||||||
n.HandleDeletionTick(now)
|
n.HandleDeletionTick(now)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) HandleMonitorTick(now time.Time) {
|
func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) {
|
||||||
n.TrafficTimer.advance(now)
|
n.TrafficTimer.advance(now)
|
||||||
for {
|
for {
|
||||||
ep := n.TrafficTimer.Purge()
|
ep := n.TrafficTimer.Purge()
|
||||||
@@ -156,44 +171,51 @@ func (n *connectionManager) HandleMonitorTick(now time.Time) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIP := ep.(uint32)
|
vpnIp := ep.(iputil.VpnIp)
|
||||||
|
|
||||||
// Check for traffic coming back in from this host.
|
// Check for traffic coming back in from this host.
|
||||||
traf := n.CheckIn(vpnIP)
|
traf := n.CheckIn(vpnIp)
|
||||||
|
|
||||||
// If we saw incoming packets from this ip, just return
|
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
|
||||||
|
if err != nil {
|
||||||
|
n.l.Debugf("Not found in hostmap: %s", vpnIp)
|
||||||
|
|
||||||
|
if !n.intf.disconnectInvalid {
|
||||||
|
n.ClearIP(vpnIp)
|
||||||
|
n.ClearPendingDeletion(vpnIp)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we saw an incoming packets from this ip and peer's certificate is not
|
||||||
|
// expired, just ignore.
|
||||||
if traf {
|
if traf {
|
||||||
if l.Level >= logrus.DebugLevel {
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
l.WithField("vpnIp", IntIp(vpnIP)).
|
n.l.WithField("vpnIp", vpnIp).
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
}
|
}
|
||||||
n.ClearIP(vpnIP)
|
n.ClearIP(vpnIp)
|
||||||
n.ClearPendingDeletion(vpnIP)
|
n.ClearPendingDeletion(vpnIp)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we didn't we may need to probe or destroy the conn
|
hostinfo.logger(n.l).
|
||||||
hostinfo, err := n.hostMap.QueryVpnIP(vpnIP)
|
|
||||||
if err != nil {
|
|
||||||
l.Debugf("Not found in hostmap: %s", IntIp(vpnIP))
|
|
||||||
n.ClearIP(vpnIP)
|
|
||||||
n.ClearPendingDeletion(vpnIP)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
hostinfo.logger().
|
|
||||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
|
|
||||||
if hostinfo != nil && hostinfo.ConnectionState != nil {
|
if hostinfo != nil && hostinfo.ConnectionState != nil {
|
||||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||||
n.intf.SendMessageToVpnIp(test, testRequest, vpnIP, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
n.intf.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, p, nb, out)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
hostinfo.logger().Debugf("Hostinfo sadness: %s", IntIp(vpnIP))
|
hostinfo.logger(n.l).Debugf("Hostinfo sadness: %s", vpnIp)
|
||||||
}
|
}
|
||||||
n.AddPendingDeletion(vpnIP)
|
n.AddPendingDeletion(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -206,49 +228,88 @@ func (n *connectionManager) HandleDeletionTick(now time.Time) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIP := ep.(uint32)
|
vpnIp := ep.(iputil.VpnIp)
|
||||||
|
|
||||||
// If we saw incoming packets from this ip, just return
|
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
|
||||||
traf := n.CheckIn(vpnIP)
|
if err != nil {
|
||||||
if traf {
|
n.l.Debugf("Not found in hostmap: %s", vpnIp)
|
||||||
l.WithField("vpnIp", IntIp(vpnIP)).
|
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "active"}).
|
if !n.intf.disconnectInvalid {
|
||||||
Debug("Tunnel status")
|
n.ClearIP(vpnIp)
|
||||||
n.ClearIP(vpnIP)
|
n.ClearPendingDeletion(vpnIp)
|
||||||
n.ClearPendingDeletion(vpnIP)
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo, err := n.hostMap.QueryVpnIP(vpnIP)
|
// If we saw an incoming packets from this ip and peer's certificate is not
|
||||||
if err != nil {
|
// expired, just ignore.
|
||||||
n.ClearIP(vpnIP)
|
traf := n.CheckIn(vpnIp)
|
||||||
n.ClearPendingDeletion(vpnIP)
|
if traf {
|
||||||
l.Debugf("Not found in hostmap: %s", IntIp(vpnIP))
|
n.l.WithField("vpnIp", vpnIp).
|
||||||
|
WithField("tunnelCheck", m{"state": "alive", "method": "active"}).
|
||||||
|
Debug("Tunnel status")
|
||||||
|
|
||||||
|
n.ClearIP(vpnIp)
|
||||||
|
n.ClearPendingDeletion(vpnIp)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it comes around on deletion wheel and hasn't resolved itself, delete
|
// If it comes around on deletion wheel and hasn't resolved itself, delete
|
||||||
if n.checkPendingDeletion(vpnIP) {
|
if n.checkPendingDeletion(vpnIp) {
|
||||||
cn := ""
|
cn := ""
|
||||||
if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil {
|
if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil {
|
||||||
cn = hostinfo.ConnectionState.peerCert.Details.Name
|
cn = hostinfo.ConnectionState.peerCert.Details.Name
|
||||||
}
|
}
|
||||||
hostinfo.logger().
|
hostinfo.logger(n.l).
|
||||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||||
WithField("certName", cn).
|
WithField("certName", cn).
|
||||||
Info("Tunnel status")
|
Info("Tunnel status")
|
||||||
|
|
||||||
n.ClearIP(vpnIP)
|
n.ClearIP(vpnIp)
|
||||||
n.ClearPendingDeletion(vpnIP)
|
n.ClearPendingDeletion(vpnIp)
|
||||||
// TODO: This is only here to let tests work. Should do proper mocking
|
// TODO: This is only here to let tests work. Should do proper mocking
|
||||||
if n.intf.lightHouse != nil {
|
if n.intf.lightHouse != nil {
|
||||||
n.intf.lightHouse.DeleteVpnIP(vpnIP)
|
n.intf.lightHouse.DeleteVpnIp(vpnIp)
|
||||||
}
|
}
|
||||||
n.hostMap.DeleteVpnIP(vpnIP)
|
n.hostMap.DeleteHostInfo(hostinfo)
|
||||||
n.hostMap.DeleteIndex(hostinfo.localIndexId)
|
|
||||||
} else {
|
} else {
|
||||||
n.ClearIP(vpnIP)
|
n.ClearIP(vpnIp)
|
||||||
n.ClearPendingDeletion(vpnIP)
|
n.ClearPendingDeletion(vpnIp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid
|
||||||
|
func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil.VpnIp, hostinfo *HostInfo) bool {
|
||||||
|
if !n.intf.disconnectInvalid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteCert := hostinfo.GetCert()
|
||||||
|
if remoteCert == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
valid, err := remoteCert.Verify(now, n.intf.caPool)
|
||||||
|
if valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
|
n.l.WithField("vpnIp", vpnIp).WithError(err).
|
||||||
|
WithField("certName", remoteCert.Details.Name).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||||
|
|
||||||
|
// Inform the remote and close the tunnel locally
|
||||||
|
n.intf.sendCloseTunnel(hostinfo)
|
||||||
|
n.intf.closeTunnel(hostinfo, false)
|
||||||
|
|
||||||
|
n.ClearIP(vpnIp)
|
||||||
|
n.ClearPendingDeletion(vpnIp)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,26 +1,33 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/rand"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
var vpnIP uint32
|
var vpnIp iputil.VpnIp
|
||||||
|
|
||||||
func Test_NewConnectionManagerTest(t *testing.T) {
|
func Test_NewConnectionManagerTest(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
vpnIP = ip2int(net.ParseIP("172.1.1.2"))
|
vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := NewHostMap("test", vpncidr, preferredRanges)
|
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
rawCertificate: []byte{},
|
rawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
privateKey: []byte{},
|
||||||
@@ -28,62 +35,68 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
rawCertificateNoKey: []byte{},
|
rawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := NewLightHouse(false, 0, []uint32{}, 1000, 0, &udpConn{}, false, 1)
|
lh := NewLightHouse(l, false, &net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{}, 1000, 0, &udp.Conn{}, false, 1, false)
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &Tun{},
|
inside: &Tun{},
|
||||||
outside: &udpConn{},
|
outside: &udp.Conn{},
|
||||||
certState: cs,
|
certState: cs,
|
||||||
firewall: &Firewall{},
|
firewall: &Firewall{},
|
||||||
lightHouse: lh,
|
lightHouse: lh,
|
||||||
handshakeManager: NewHandshakeManager(vpncidr, preferredRanges, hostMap, lh, &udpConn{}, defaultHandshakeConfig),
|
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
nc := newConnectionManager(ifce, 5, 10)
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
nc.HandleMonitorTick(now)
|
defer cancel()
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
||||||
|
p := []byte("")
|
||||||
|
nb := make([]byte, 12, 12)
|
||||||
|
out := make([]byte, mtu)
|
||||||
|
nc.HandleMonitorTick(now, p, nb, out)
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo := nc.hostMap.AddVpnIP(vpnIP)
|
hostinfo := nc.hostMap.AddVpnIp(vpnIp)
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
certState: cs,
|
certState: cs,
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
messageCounter: new(uint64),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We saw traffic out to vpnIP
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(vpnIP)
|
nc.Out(vpnIp)
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIP)
|
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
|
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
||||||
// Move ahead 5s. Nothing should happen
|
// Move ahead 5s. Nothing should happen
|
||||||
next_tick := now.Add(5 * time.Second)
|
next_tick := now.Add(5 * time.Second)
|
||||||
nc.HandleMonitorTick(next_tick)
|
nc.HandleMonitorTick(next_tick, p, nb, out)
|
||||||
nc.HandleDeletionTick(next_tick)
|
nc.HandleDeletionTick(next_tick)
|
||||||
// Move ahead 6s. We haven't heard back
|
// Move ahead 6s. We haven't heard back
|
||||||
next_tick = now.Add(6 * time.Second)
|
next_tick = now.Add(6 * time.Second)
|
||||||
nc.HandleMonitorTick(next_tick)
|
nc.HandleMonitorTick(next_tick, p, nb, out)
|
||||||
nc.HandleDeletionTick(next_tick)
|
nc.HandleDeletionTick(next_tick)
|
||||||
// This host should now be up for deletion
|
// This host should now be up for deletion
|
||||||
assert.Contains(t, nc.pendingDeletion, vpnIP)
|
assert.Contains(t, nc.pendingDeletion, vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
|
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
||||||
// Move ahead some more
|
// Move ahead some more
|
||||||
next_tick = now.Add(45 * time.Second)
|
next_tick = now.Add(45 * time.Second)
|
||||||
nc.HandleMonitorTick(next_tick)
|
nc.HandleMonitorTick(next_tick, p, nb, out)
|
||||||
nc.HandleDeletionTick(next_tick)
|
nc.HandleDeletionTick(next_tick)
|
||||||
// The host should be evicted
|
// The host should be evicted
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIP)
|
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
||||||
assert.NotContains(t, nc.hostMap.Hosts, vpnIP)
|
assert.NotContains(t, nc.hostMap.Hosts, vpnIp)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewConnectionManagerTest2(t *testing.T) {
|
func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := NewHostMap("test", vpncidr, preferredRanges)
|
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
rawCertificate: []byte{},
|
rawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
privateKey: []byte{},
|
||||||
@@ -91,52 +104,152 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
rawCertificateNoKey: []byte{},
|
rawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := NewLightHouse(false, 0, []uint32{}, 1000, 0, &udpConn{}, false, 1)
|
lh := NewLightHouse(l, false, &net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{}, 1000, 0, &udp.Conn{}, false, 1, false)
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: hostMap,
|
hostMap: hostMap,
|
||||||
inside: &Tun{},
|
inside: &Tun{},
|
||||||
outside: &udpConn{},
|
outside: &udp.Conn{},
|
||||||
certState: cs,
|
certState: cs,
|
||||||
firewall: &Firewall{},
|
firewall: &Firewall{},
|
||||||
lightHouse: lh,
|
lightHouse: lh,
|
||||||
handshakeManager: NewHandshakeManager(vpncidr, preferredRanges, hostMap, lh, &udpConn{}, defaultHandshakeConfig),
|
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
nc := newConnectionManager(ifce, 5, 10)
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
nc.HandleMonitorTick(now)
|
defer cancel()
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
||||||
|
p := []byte("")
|
||||||
|
nb := make([]byte, 12, 12)
|
||||||
|
out := make([]byte, mtu)
|
||||||
|
nc.HandleMonitorTick(now, p, nb, out)
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo := nc.hostMap.AddVpnIP(vpnIP)
|
hostinfo := nc.hostMap.AddVpnIp(vpnIp)
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
certState: cs,
|
certState: cs,
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
messageCounter: new(uint64),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We saw traffic out to vpnIP
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(vpnIP)
|
nc.Out(vpnIp)
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIP)
|
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
|
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
||||||
// Move ahead 5s. Nothing should happen
|
// Move ahead 5s. Nothing should happen
|
||||||
next_tick := now.Add(5 * time.Second)
|
next_tick := now.Add(5 * time.Second)
|
||||||
nc.HandleMonitorTick(next_tick)
|
nc.HandleMonitorTick(next_tick, p, nb, out)
|
||||||
nc.HandleDeletionTick(next_tick)
|
nc.HandleDeletionTick(next_tick)
|
||||||
// Move ahead 6s. We haven't heard back
|
// Move ahead 6s. We haven't heard back
|
||||||
next_tick = now.Add(6 * time.Second)
|
next_tick = now.Add(6 * time.Second)
|
||||||
nc.HandleMonitorTick(next_tick)
|
nc.HandleMonitorTick(next_tick, p, nb, out)
|
||||||
nc.HandleDeletionTick(next_tick)
|
nc.HandleDeletionTick(next_tick)
|
||||||
// This host should now be up for deletion
|
// This host should now be up for deletion
|
||||||
assert.Contains(t, nc.pendingDeletion, vpnIP)
|
assert.Contains(t, nc.pendingDeletion, vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
|
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
||||||
// We heard back this time
|
// We heard back this time
|
||||||
nc.In(vpnIP)
|
nc.In(vpnIp)
|
||||||
// Move ahead some more
|
// Move ahead some more
|
||||||
next_tick = now.Add(45 * time.Second)
|
next_tick = now.Add(45 * time.Second)
|
||||||
nc.HandleMonitorTick(next_tick)
|
nc.HandleMonitorTick(next_tick, p, nb, out)
|
||||||
nc.HandleDeletionTick(next_tick)
|
nc.HandleDeletionTick(next_tick)
|
||||||
// The host should be evicted
|
// The host should be evicted
|
||||||
assert.NotContains(t, nc.pendingDeletion, vpnIP)
|
assert.NotContains(t, nc.pendingDeletion, vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
|
assert.Contains(t, nc.hostMap.Hosts, vpnIp)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if we can disconnect the peer.
|
||||||
|
// Validate if the peer's certificate is invalid (expired, etc.)
|
||||||
|
// Disconnect only if disconnectInvalid: true is set.
|
||||||
|
func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
l := util.NewTestLogger()
|
||||||
|
ipNet := net.IPNet{
|
||||||
|
IP: net.IPv4(172, 1, 1, 2),
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
}
|
||||||
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
|
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||||
|
|
||||||
|
// Generate keys for CA and peer's cert.
|
||||||
|
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
|
caCert := cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "ca",
|
||||||
|
NotBefore: now,
|
||||||
|
NotAfter: now.Add(1 * time.Hour),
|
||||||
|
IsCA: true,
|
||||||
|
PublicKey: pubCA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
caCert.Sign(privCA)
|
||||||
|
ncp := &cert.NebulaCAPool{
|
||||||
|
CAs: cert.NewCAPool().CAs,
|
||||||
|
}
|
||||||
|
ncp.CAs["ca"] = &caCert
|
||||||
|
|
||||||
|
pubCrt, _, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
|
peerCert := cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "host",
|
||||||
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
Subnets: []*net.IPNet{},
|
||||||
|
NotBefore: now,
|
||||||
|
NotAfter: now.Add(60 * time.Second),
|
||||||
|
PublicKey: pubCrt,
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: "ca",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
peerCert.Sign(privCA)
|
||||||
|
|
||||||
|
cs := &CertState{
|
||||||
|
rawCertificate: []byte{},
|
||||||
|
privateKey: []byte{},
|
||||||
|
certificate: &cert.NebulaCertificate{},
|
||||||
|
rawCertificateNoKey: []byte{},
|
||||||
|
}
|
||||||
|
|
||||||
|
lh := NewLightHouse(l, false, &net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{}, 1000, 0, &udp.Conn{}, false, 1, false)
|
||||||
|
ifce := &Interface{
|
||||||
|
hostMap: hostMap,
|
||||||
|
inside: &Tun{},
|
||||||
|
outside: &udp.Conn{},
|
||||||
|
certState: cs,
|
||||||
|
firewall: &Firewall{},
|
||||||
|
lightHouse: lh,
|
||||||
|
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||||
|
l: l,
|
||||||
|
disconnectInvalid: true,
|
||||||
|
caPool: ncp,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create manager
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
nc := newConnectionManager(ctx, l, ifce, 5, 10)
|
||||||
|
ifce.connectionManager = nc
|
||||||
|
hostinfo := nc.hostMap.AddVpnIp(vpnIp)
|
||||||
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
|
certState: cs,
|
||||||
|
peerCert: &peerCert,
|
||||||
|
H: &noise.HandshakeState{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move ahead 45s.
|
||||||
|
// Check if to disconnect with invalid certificate.
|
||||||
|
// Should be alive.
|
||||||
|
nextTick := now.Add(45 * time.Second)
|
||||||
|
destroyed := nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
|
||||||
|
assert.False(t, destroyed)
|
||||||
|
|
||||||
|
// Move ahead 61s.
|
||||||
|
// Check if to disconnect with invalid certificate.
|
||||||
|
// Should be disconnected.
|
||||||
|
nextTick = now.Add(61 * time.Second)
|
||||||
|
destroyed = nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
|
||||||
|
assert.True(t, destroyed)
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,8 +4,10 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -18,14 +20,14 @@ type ConnectionState struct {
|
|||||||
certState *CertState
|
certState *CertState
|
||||||
peerCert *cert.NebulaCertificate
|
peerCert *cert.NebulaCertificate
|
||||||
initiator bool
|
initiator bool
|
||||||
messageCounter *uint64
|
atomicMessageCounter uint64
|
||||||
window *Bits
|
window *Bits
|
||||||
queueLock sync.Mutex
|
queueLock sync.Mutex
|
||||||
writeLock sync.Mutex
|
writeLock sync.Mutex
|
||||||
ready bool
|
ready bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) newConnectionState(initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, p []byte) (*ConnectionState, error) {
|
||||||
cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256)
|
cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256)
|
||||||
if f.cipher == "chachapoly" {
|
if f.cipher == "chachapoly" {
|
||||||
cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256)
|
cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||||
@@ -36,19 +38,20 @@ func (f *Interface) newConnectionState(initiator bool, pattern noise.HandshakePa
|
|||||||
|
|
||||||
b := NewBits(ReplayWindow)
|
b := NewBits(ReplayWindow)
|
||||||
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
||||||
b.Update(0)
|
b.Update(l, 0)
|
||||||
|
|
||||||
hs, err := noise.NewHandshakeState(noise.Config{
|
hs, err := noise.NewHandshakeState(noise.Config{
|
||||||
CipherSuite: cs,
|
CipherSuite: cs,
|
||||||
Random: rand.Reader,
|
Random: rand.Reader,
|
||||||
Pattern: pattern,
|
Pattern: noise.HandshakeIX,
|
||||||
Initiator: initiator,
|
Initiator: initiator,
|
||||||
StaticKeypair: static,
|
StaticKeypair: static,
|
||||||
PresharedKey: psk,
|
PresharedKey: p,
|
||||||
PresharedKeyPlacement: pskStage,
|
PresharedKeyPlacement: 0,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// The queue and ready params prevent a counter race that would happen when
|
// The queue and ready params prevent a counter race that would happen when
|
||||||
@@ -59,17 +62,16 @@ func (f *Interface) newConnectionState(initiator bool, pattern noise.HandshakePa
|
|||||||
window: b,
|
window: b,
|
||||||
ready: false,
|
ready: false,
|
||||||
certState: curCertState,
|
certState: curCertState,
|
||||||
messageCounter: new(uint64),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ci
|
return ci, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(m{
|
return json.Marshal(m{
|
||||||
"certificate": cs.peerCert,
|
"certificate": cs.peerCert,
|
||||||
"initiator": cs.initiator,
|
"initiator": cs.initiator,
|
||||||
"message_counter": cs.messageCounter,
|
"message_counter": atomic.LoadUint64(&cs.atomicMessageCounter),
|
||||||
"ready": cs.ready,
|
"ready": cs.ready,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
215
control.go
Normal file
215
control.go
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync/atomic"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
||||||
|
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
|
||||||
|
|
||||||
|
type Control struct {
|
||||||
|
f *Interface
|
||||||
|
l *logrus.Logger
|
||||||
|
cancel context.CancelFunc
|
||||||
|
sshStart func()
|
||||||
|
statsStart func()
|
||||||
|
dnsStart func()
|
||||||
|
}
|
||||||
|
|
||||||
|
type ControlHostInfo struct {
|
||||||
|
VpnIp net.IP `json:"vpnIp"`
|
||||||
|
LocalIndex uint32 `json:"localIndex"`
|
||||||
|
RemoteIndex uint32 `json:"remoteIndex"`
|
||||||
|
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
|
||||||
|
CachedPackets int `json:"cachedPackets"`
|
||||||
|
Cert *cert.NebulaCertificate `json:"cert"`
|
||||||
|
MessageCounter uint64 `json:"messageCounter"`
|
||||||
|
CurrentRemote *udp.Addr `json:"currentRemote"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
|
||||||
|
func (c *Control) Start() {
|
||||||
|
// Activate the interface
|
||||||
|
c.f.activate()
|
||||||
|
|
||||||
|
// Call all the delayed funcs that waited patiently for the interface to be created.
|
||||||
|
if c.sshStart != nil {
|
||||||
|
go c.sshStart()
|
||||||
|
}
|
||||||
|
if c.statsStart != nil {
|
||||||
|
go c.statsStart()
|
||||||
|
}
|
||||||
|
if c.dnsStart != nil {
|
||||||
|
go c.dnsStart()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start reading packets.
|
||||||
|
c.f.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop signals nebula to shutdown, returns after the shutdown is complete
|
||||||
|
func (c *Control) Stop() {
|
||||||
|
//TODO: stop tun and udp routines, the lock on hostMap effectively does that though
|
||||||
|
c.CloseAllTunnels(false)
|
||||||
|
c.cancel()
|
||||||
|
c.l.Info("Goodbye")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled
|
||||||
|
func (c *Control) ShutdownBlock() {
|
||||||
|
sigChan := make(chan os.Signal)
|
||||||
|
signal.Notify(sigChan, syscall.SIGTERM)
|
||||||
|
signal.Notify(sigChan, syscall.SIGINT)
|
||||||
|
|
||||||
|
rawSig := <-sigChan
|
||||||
|
sig := rawSig.String()
|
||||||
|
c.l.WithField("signal", sig).Info("Caught signal, shutting down")
|
||||||
|
c.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RebindUDPServer asks the UDP listener to rebind it's listener. Mainly used on mobile clients when interfaces change
|
||||||
|
func (c *Control) RebindUDPServer() {
|
||||||
|
_ = c.f.outside.Rebind()
|
||||||
|
|
||||||
|
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
|
||||||
|
c.f.lightHouse.SendUpdate(c.f)
|
||||||
|
|
||||||
|
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
|
||||||
|
c.f.rebindCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListHostmap returns details about the actual or pending (handshaking) hostmap
|
||||||
|
func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo {
|
||||||
|
if pendingMap {
|
||||||
|
return listHostMap(c.f.handshakeManager.pendingHostMap)
|
||||||
|
} else {
|
||||||
|
return listHostMap(c.f.hostMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
||||||
|
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
|
||||||
|
var hm *HostMap
|
||||||
|
if pending {
|
||||||
|
hm = c.f.handshakeManager.pendingHostMap
|
||||||
|
} else {
|
||||||
|
hm = c.f.hostMap
|
||||||
|
}
|
||||||
|
|
||||||
|
h, err := hm.QueryVpnIp(vpnIp)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := copyHostInfo(h, c.f.hostMap.preferredRanges)
|
||||||
|
return &ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
||||||
|
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
|
||||||
|
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hostInfo.SetRemote(addr.Copy())
|
||||||
|
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges)
|
||||||
|
return &ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
||||||
|
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
|
||||||
|
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !localOnly {
|
||||||
|
c.f.send(
|
||||||
|
header.CloseTunnel,
|
||||||
|
0,
|
||||||
|
hostInfo.ConnectionState,
|
||||||
|
hostInfo,
|
||||||
|
hostInfo.remote,
|
||||||
|
[]byte{},
|
||||||
|
make([]byte, 12, 12),
|
||||||
|
make([]byte, mtu),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.f.closeTunnel(hostInfo, false)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels
|
||||||
|
// the int returned is a count of tunnels closed
|
||||||
|
func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
||||||
|
//TODO: this is probably better as a function in ConnectionManager or HostMap directly
|
||||||
|
c.f.hostMap.Lock()
|
||||||
|
for _, h := range c.f.hostMap.Hosts {
|
||||||
|
if excludeLighthouses {
|
||||||
|
if _, ok := c.f.lightHouse.lighthouses[h.vpnIp]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ConnectionState.ready {
|
||||||
|
c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, h.remote, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
||||||
|
c.f.closeTunnel(h, true)
|
||||||
|
|
||||||
|
c.l.WithField("vpnIp", h.vpnIp).WithField("udpAddr", h.remote).
|
||||||
|
Debug("Sending close tunnel message")
|
||||||
|
closed++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.f.hostMap.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||||
|
chi := ControlHostInfo{
|
||||||
|
VpnIp: h.vpnIp.ToIP(),
|
||||||
|
LocalIndex: h.localIndexId,
|
||||||
|
RemoteIndex: h.remoteIndexId,
|
||||||
|
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
||||||
|
CachedPackets: len(h.packetStore),
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ConnectionState != nil {
|
||||||
|
chi.MessageCounter = atomic.LoadUint64(&h.ConnectionState.atomicMessageCounter)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c := h.GetCert(); c != nil {
|
||||||
|
chi.Cert = c.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.remote != nil {
|
||||||
|
chi.CurrentRemote = h.remote.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
return chi
|
||||||
|
}
|
||||||
|
|
||||||
|
func listHostMap(hm *HostMap) []ControlHostInfo {
|
||||||
|
hm.RLock()
|
||||||
|
hosts := make([]ControlHostInfo, len(hm.Hosts))
|
||||||
|
i := 0
|
||||||
|
for _, v := range hm.Hosts {
|
||||||
|
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
hm.RUnlock()
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
}
|
||||||
113
control_test.go
Normal file
113
control_test.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
|
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
||||||
|
// To properly ensure we are not exposing core memory to the caller
|
||||||
|
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0))
|
||||||
|
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
|
||||||
|
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
|
||||||
|
ipNet := net.IPNet{
|
||||||
|
IP: net.IPv4(1, 2, 3, 4),
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
ipNet2 := net.IPNet{
|
||||||
|
IP: net.ParseIP("1:2:3:4:5:6:7:8"),
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
crt := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test",
|
||||||
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
Subnets: []*net.IPNet{},
|
||||||
|
Groups: []string{"default-group"},
|
||||||
|
NotBefore: time.Unix(1, 0),
|
||||||
|
NotAfter: time.Unix(2, 0),
|
||||||
|
PublicKey: []byte{5, 6, 7, 8},
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: "the-issuer",
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||||
|
},
|
||||||
|
Signature: []byte{1, 2, 1, 2, 1, 3},
|
||||||
|
}
|
||||||
|
|
||||||
|
remotes := NewRemoteList()
|
||||||
|
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
||||||
|
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
||||||
|
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
|
||||||
|
remote: remote1,
|
||||||
|
remotes: remotes,
|
||||||
|
ConnectionState: &ConnectionState{
|
||||||
|
peerCert: crt,
|
||||||
|
},
|
||||||
|
remoteIndexId: 200,
|
||||||
|
localIndexId: 201,
|
||||||
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
|
})
|
||||||
|
|
||||||
|
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{
|
||||||
|
remote: remote1,
|
||||||
|
remotes: remotes,
|
||||||
|
ConnectionState: &ConnectionState{
|
||||||
|
peerCert: nil,
|
||||||
|
},
|
||||||
|
remoteIndexId: 200,
|
||||||
|
localIndexId: 201,
|
||||||
|
vpnIp: iputil.Ip2VpnIp(ipNet2.IP),
|
||||||
|
})
|
||||||
|
|
||||||
|
c := Control{
|
||||||
|
f: &Interface{
|
||||||
|
hostMap: hm,
|
||||||
|
},
|
||||||
|
l: logrus.New(),
|
||||||
|
}
|
||||||
|
|
||||||
|
thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false)
|
||||||
|
|
||||||
|
expectedInfo := ControlHostInfo{
|
||||||
|
VpnIp: net.IPv4(1, 2, 3, 4).To4(),
|
||||||
|
LocalIndex: 201,
|
||||||
|
RemoteIndex: 200,
|
||||||
|
RemoteAddrs: []*udp.Addr{remote2, remote1},
|
||||||
|
CachedPackets: 0,
|
||||||
|
Cert: crt.Copy(),
|
||||||
|
MessageCounter: 0,
|
||||||
|
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure we don't have any unexpected fields
|
||||||
|
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote"}, thi)
|
||||||
|
util.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||||
|
|
||||||
|
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertFields(t *testing.T, expected []string, actualStruct interface{}) {
|
||||||
|
val := reflect.ValueOf(actualStruct).Elem()
|
||||||
|
fields := make([]string, val.NumField())
|
||||||
|
for i := 0; i < val.NumField(); i++ {
|
||||||
|
fields[i] = val.Type().Field(i).Name
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, expected, fields)
|
||||||
|
}
|
||||||
132
control_tester.go
Normal file
132
control_tester.go
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/google/gopacket"
|
||||||
|
"github.com/google/gopacket/layers"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WaitForTypeByIndex will pipe all messages from this control device into the pipeTo control device
|
||||||
|
// returning after a message matching the criteria has been piped
|
||||||
|
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||||
|
h := &header.H{}
|
||||||
|
for {
|
||||||
|
p := c.f.outside.Get(true)
|
||||||
|
if err := h.Parse(p.Data); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pipeTo.InjectUDPPacket(p)
|
||||||
|
if h.Type == msgType && h.Subtype == subType {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForTypeByIndex is similar to WaitForType except it adds an index check
|
||||||
|
// Useful if you have many nodes communicating and want to wait to find a specific nodes packet
|
||||||
|
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||||
|
h := &header.H{}
|
||||||
|
for {
|
||||||
|
p := c.f.outside.Get(true)
|
||||||
|
if err := h.Parse(p.Data); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pipeTo.InjectUDPPacket(p)
|
||||||
|
if h.RemoteIndex == toIndex && h.Type == msgType && h.Subtype == subType {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
|
||||||
|
// This is necessary if you did not configure static hosts or are not running a lighthouse
|
||||||
|
func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) {
|
||||||
|
c.f.lightHouse.Lock()
|
||||||
|
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
|
||||||
|
remoteList.Lock()
|
||||||
|
defer remoteList.Unlock()
|
||||||
|
c.f.lightHouse.Unlock()
|
||||||
|
|
||||||
|
iVpnIp := iputil.Ip2VpnIp(vpnIp)
|
||||||
|
if v4 := toAddr.IP.To4(); v4 != nil {
|
||||||
|
remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port)))
|
||||||
|
} else {
|
||||||
|
remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFromTun will pull a packet off the tun side of nebula
|
||||||
|
func (c *Control) GetFromTun(block bool) []byte {
|
||||||
|
return c.f.inside.(*Tun).Get(block)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFromUDP will pull a udp packet off the udp side of nebula
|
||||||
|
func (c *Control) GetFromUDP(block bool) *udp.Packet {
|
||||||
|
return c.f.outside.Get(block)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
|
||||||
|
return c.f.outside.TxPackets
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) GetTunTxChan() <-chan []byte {
|
||||||
|
return c.f.inside.(*Tun).txPackets
|
||||||
|
}
|
||||||
|
|
||||||
|
// InjectUDPPacket will inject a packet into the udp side of nebula
|
||||||
|
func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
||||||
|
c.f.outside.Send(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
||||||
|
func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) {
|
||||||
|
ip := layers.IPv4{
|
||||||
|
Version: 4,
|
||||||
|
TTL: 64,
|
||||||
|
Protocol: layers.IPProtocolUDP,
|
||||||
|
SrcIP: c.f.inside.CidrNet().IP,
|
||||||
|
DstIP: toIp,
|
||||||
|
}
|
||||||
|
|
||||||
|
udp := layers.UDP{
|
||||||
|
SrcPort: layers.UDPPort(fromPort),
|
||||||
|
DstPort: layers.UDPPort(toPort),
|
||||||
|
}
|
||||||
|
err := udp.SetNetworkLayerForChecksum(&ip)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := gopacket.NewSerializeBuffer()
|
||||||
|
opt := gopacket.SerializeOptions{
|
||||||
|
ComputeChecksums: true,
|
||||||
|
FixLengths: true,
|
||||||
|
}
|
||||||
|
err = gopacket.SerializeLayers(buffer, opt, &ip, &udp, gopacket.Payload(data))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.f.inside.(*Tun).Send(buffer.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) GetUDPAddr() string {
|
||||||
|
return c.f.outside.Addr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
||||||
|
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||||
|
return true
|
||||||
|
}
|
||||||
6
dist/arch/nebula.service
vendored
6
dist/arch/nebula.service
vendored
@@ -1,12 +1,10 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=nebula
|
Description=nebula
|
||||||
Wants=basic.target
|
Wants=basic.target network-online.target
|
||||||
After=basic.target network.target
|
After=basic.target network.target network-online.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
SyslogIdentifier=nebula
|
SyslogIdentifier=nebula
|
||||||
StandardOutput=syslog
|
|
||||||
StandardError=syslog
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|||||||
15
dist/fedora/nebula.service
vendored
Normal file
15
dist/fedora/nebula.service
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Nebula overlay networking tool
|
||||||
|
|
||||||
|
After=basic.target network.target network-online.target
|
||||||
|
Before=sshd.service
|
||||||
|
Wants=basic.target network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||||
|
Restart=always
|
||||||
|
SyslogIdentifier=nebula
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
@@ -7,6 +7,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This whole thing should be rewritten to use context
|
// This whole thing should be rewritten to use context
|
||||||
@@ -43,8 +46,8 @@ func (d *dnsRecords) QueryCert(data string) string {
|
|||||||
if ip == nil {
|
if ip == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
iip := ip2int(ip)
|
iip := iputil.Ip2VpnIp(ip)
|
||||||
hostinfo, err := d.hostMap.QueryVpnIP(iip)
|
hostinfo, err := d.hostMap.QueryVpnIp(iip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@@ -63,7 +66,7 @@ func (d *dnsRecords) Add(host, data string) {
|
|||||||
d.Unlock()
|
d.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseQuery(m *dns.Msg, w dns.ResponseWriter) {
|
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||||
for _, q := range m.Question {
|
for _, q := range m.Question {
|
||||||
switch q.Qtype {
|
switch q.Qtype {
|
||||||
case dns.TypeA:
|
case dns.TypeA:
|
||||||
@@ -95,37 +98,44 @@ func parseQuery(m *dns.Msg, w dns.ResponseWriter) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleDnsRequest(w dns.ResponseWriter, r *dns.Msg) {
|
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
|
||||||
m := new(dns.Msg)
|
m := new(dns.Msg)
|
||||||
m.SetReply(r)
|
m.SetReply(r)
|
||||||
m.Compress = false
|
m.Compress = false
|
||||||
|
|
||||||
switch r.Opcode {
|
switch r.Opcode {
|
||||||
case dns.OpcodeQuery:
|
case dns.OpcodeQuery:
|
||||||
parseQuery(m, w)
|
parseQuery(l, m, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteMsg(m)
|
w.WriteMsg(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dnsMain(hostMap *HostMap, c *Config) {
|
func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
|
||||||
dnsR = newDnsRecords(hostMap)
|
dnsR = newDnsRecords(hostMap)
|
||||||
|
|
||||||
// attach request handler func
|
// attach request handler func
|
||||||
dns.HandleFunc(".", handleDnsRequest)
|
dns.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) {
|
||||||
|
handleDnsRequest(l, w, r)
|
||||||
|
})
|
||||||
|
|
||||||
c.RegisterReloadCallback(reloadDns)
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
startDns(c)
|
reloadDns(l, c)
|
||||||
|
})
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
startDns(l, c)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDnsServerAddr(c *Config) string {
|
func getDnsServerAddr(c *config.C) string {
|
||||||
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
|
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
|
||||||
}
|
}
|
||||||
|
|
||||||
func startDns(c *Config) {
|
func startDns(l *logrus.Logger, c *config.C) {
|
||||||
dnsAddr = getDnsServerAddr(c)
|
dnsAddr = getDnsServerAddr(c)
|
||||||
dnsServer = &dns.Server{Addr: dnsAddr, Net: "udp"}
|
dnsServer = &dns.Server{Addr: dnsAddr, Net: "udp"}
|
||||||
l.Debugf("Starting DNS responder at %s\n", dnsAddr)
|
l.WithField("dnsListener", dnsAddr).Infof("Starting DNS responder")
|
||||||
err := dnsServer.ListenAndServe()
|
err := dnsServer.ListenAndServe()
|
||||||
defer dnsServer.Shutdown()
|
defer dnsServer.Shutdown()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -133,7 +143,7 @@ func startDns(c *Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func reloadDns(c *Config) {
|
func reloadDns(l *logrus.Logger, c *config.C) {
|
||||||
if dnsAddr == getDnsServerAddr(c) {
|
if dnsAddr == getDnsServerAddr(c) {
|
||||||
l.Debug("No DNS server config change detected")
|
l.Debug("No DNS server config change detected")
|
||||||
return
|
return
|
||||||
@@ -141,5 +151,5 @@ func reloadDns(c *Config) {
|
|||||||
|
|
||||||
l.Debug("Restarting DNS server")
|
l.Debug("Restarting DNS server")
|
||||||
dnsServer.Shutdown()
|
dnsServer.Shutdown()
|
||||||
go startDns(c)
|
go startDns(l, c)
|
||||||
}
|
}
|
||||||
|
|||||||
3
e2e/doc.go
Normal file
3
e2e/doc.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
// This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before
|
||||||
333
e2e/handshakes_test.go
Normal file
333
e2e/handshakes_test.go
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGoodHandshake(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
||||||
|
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||||
|
|
||||||
|
t.Log("Get their stage 1 packet so that we can play with it")
|
||||||
|
stage1Packet := theirControl.GetFromUDP(true)
|
||||||
|
|
||||||
|
t.Log("I consume a garbage packet with a proper nebula header for our tunnel")
|
||||||
|
// this should log a statement and get ignored, allowing the real handshake packet to complete the tunnel
|
||||||
|
badPacket := stage1Packet.Copy()
|
||||||
|
badPacket.Data = badPacket.Data[:len(badPacket.Data)-header.Len]
|
||||||
|
myControl.InjectUDPPacket(badPacket)
|
||||||
|
|
||||||
|
t.Log("Have me consume their real stage 1 packet. I have a tunnel now")
|
||||||
|
myControl.InjectUDPPacket(stage1Packet)
|
||||||
|
|
||||||
|
t.Log("Wait until we see my cached packet come through")
|
||||||
|
myControl.WaitForType(1, 0, theirControl)
|
||||||
|
|
||||||
|
t.Log("Make sure our host infos are correct")
|
||||||
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
||||||
|
|
||||||
|
t.Log("Get that cached packet and make sure it looks right")
|
||||||
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
||||||
|
|
||||||
|
t.Log("Do a bidirectional tunnel test")
|
||||||
|
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, router.NewR(myControl, theirControl))
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
//TODO: assert hostmaps
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWrongResponderHandshake(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
|
||||||
|
// The IPs here are chosen on purpose:
|
||||||
|
// The current remote handling will sort by preference, public, and then lexically.
|
||||||
|
// So we need them to have a higher address than evil (we could apply a preference though)
|
||||||
|
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
|
||||||
|
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
|
||||||
|
evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Add their real udp addr, which should be tried after evil.
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
||||||
|
|
||||||
|
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIp, evilUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(myControl, theirControl, evilControl)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
evilControl.Start()
|
||||||
|
|
||||||
|
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
||||||
|
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||||
|
h := &header.H{}
|
||||||
|
err := h.Parse(p.Data)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 {
|
||||||
|
return router.RouteAndExit
|
||||||
|
}
|
||||||
|
|
||||||
|
return router.KeepRouting
|
||||||
|
})
|
||||||
|
|
||||||
|
//TODO: Assert pending hostmap - I should have a correct hostinfo for them now
|
||||||
|
|
||||||
|
t.Log("My cached packet should be received by them")
|
||||||
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
||||||
|
|
||||||
|
t.Log("Test the tunnel with them")
|
||||||
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
||||||
|
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
||||||
|
|
||||||
|
t.Log("Flush all packets from all controllers")
|
||||||
|
r.FlushAll()
|
||||||
|
|
||||||
|
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
||||||
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), true), "My pending hostmap should not contain evil")
|
||||||
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), false), "My main hostmap should not contain evil")
|
||||||
|
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
||||||
|
|
||||||
|
//TODO: assert hostmaps for everyone
|
||||||
|
t.Log("Success!")
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_Case1_Stage1Race(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
|
// Put their info in our lighthouse and vice versa
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIp, myUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(myControl, theirControl)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Trigger a handshake to start on both me and them")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIp, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
t.Log("Get both stage 1 handshake packets")
|
||||||
|
myHsForThem := myControl.GetFromUDP(true)
|
||||||
|
theirHsForMe := theirControl.GetFromUDP(true)
|
||||||
|
|
||||||
|
t.Log("Now inject both stage 1 handshake packets")
|
||||||
|
myControl.InjectUDPPacket(theirHsForMe)
|
||||||
|
theirControl.InjectUDPPacket(myHsForThem)
|
||||||
|
//TODO: they should win, grab their index for me and make sure I use it in the end.
|
||||||
|
|
||||||
|
t.Log("They should not have a stage 2 (won the race) but I should send one")
|
||||||
|
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||||
|
|
||||||
|
t.Log("Route for me until I send a message packet to them")
|
||||||
|
myControl.WaitForType(1, 0, theirControl)
|
||||||
|
|
||||||
|
t.Log("My cached packet should be received by them")
|
||||||
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
||||||
|
|
||||||
|
t.Log("Route for them until I send a message packet to me")
|
||||||
|
theirControl.WaitForType(1, 0, myControl)
|
||||||
|
|
||||||
|
t.Log("Their cached packet should be received by me")
|
||||||
|
theirCachedPacket := myControl.GetFromTun(true)
|
||||||
|
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIp, myVpnIp, 80, 80)
|
||||||
|
|
||||||
|
t.Log("Do a bidirectional tunnel test")
|
||||||
|
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
//TODO: assert hostmaps
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: add a test with many lies
|
||||||
|
|
||||||
|
func TestPSK(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
myPskMode nebula.PskMode
|
||||||
|
theirPskMode nebula.PskMode
|
||||||
|
}{
|
||||||
|
// None and transitional-accepting both ways
|
||||||
|
{
|
||||||
|
name: "none to transitional-accepting",
|
||||||
|
myPskMode: nebula.PskNone,
|
||||||
|
theirPskMode: nebula.PskTransitionalAccepting,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "transitional-accepting to none",
|
||||||
|
myPskMode: nebula.PskTransitionalAccepting,
|
||||||
|
theirPskMode: nebula.PskNone,
|
||||||
|
},
|
||||||
|
|
||||||
|
// All transitional-accepting
|
||||||
|
{
|
||||||
|
name: "both transitional-accepting",
|
||||||
|
myPskMode: nebula.PskTransitionalAccepting,
|
||||||
|
theirPskMode: nebula.PskTransitionalAccepting,
|
||||||
|
},
|
||||||
|
|
||||||
|
// transitional-accepting and transitional-sending both ways
|
||||||
|
{
|
||||||
|
name: "transitional-accepting to transitional-sending",
|
||||||
|
myPskMode: nebula.PskTransitionalAccepting,
|
||||||
|
theirPskMode: nebula.PskTransitionalSending,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "transitional-sending to transitional-accepting",
|
||||||
|
myPskMode: nebula.PskTransitionalSending,
|
||||||
|
theirPskMode: nebula.PskTransitionalAccepting,
|
||||||
|
},
|
||||||
|
|
||||||
|
// All transitional-sending
|
||||||
|
{
|
||||||
|
name: "transitional-sending to transitional-sending",
|
||||||
|
myPskMode: nebula.PskTransitionalSending,
|
||||||
|
theirPskMode: nebula.PskTransitionalSending,
|
||||||
|
},
|
||||||
|
|
||||||
|
// enforced and transitional-sending both ways
|
||||||
|
{
|
||||||
|
name: "enforced to transitional-sending",
|
||||||
|
myPskMode: nebula.PskEnforced,
|
||||||
|
theirPskMode: nebula.PskTransitionalSending,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "transitional-sending to enforced",
|
||||||
|
myPskMode: nebula.PskTransitionalSending,
|
||||||
|
theirPskMode: nebula.PskEnforced,
|
||||||
|
},
|
||||||
|
|
||||||
|
// All enforced
|
||||||
|
{
|
||||||
|
name: "both enforced",
|
||||||
|
myPskMode: nebula.PskEnforced,
|
||||||
|
theirPskMode: nebula.PskEnforced,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Enforced can technically handshake with a traditional-accepting but it is bad to be in this state
|
||||||
|
{
|
||||||
|
name: "enforced to traditional-accepting",
|
||||||
|
myPskMode: nebula.PskEnforced,
|
||||||
|
theirPskMode: nebula.PskTransitionalAccepting,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
var myPskSettings, theirPskSettings *m
|
||||||
|
|
||||||
|
switch test.myPskMode {
|
||||||
|
case nebula.PskNone:
|
||||||
|
myPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "none"}}}
|
||||||
|
case nebula.PskTransitionalAccepting:
|
||||||
|
myPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "transitional-accepting", "keys": []string{"this is a key"}}}}
|
||||||
|
case nebula.PskTransitionalSending:
|
||||||
|
myPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "transitional-sending", "keys": []string{"this is a key"}}}}
|
||||||
|
case nebula.PskEnforced:
|
||||||
|
myPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "enforced", "keys": []string{"this is a key"}}}}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch test.theirPskMode {
|
||||||
|
case nebula.PskNone:
|
||||||
|
theirPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "none"}}}
|
||||||
|
case nebula.PskTransitionalAccepting:
|
||||||
|
theirPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "transitional-accepting", "keys": []string{"this is a key"}}}}
|
||||||
|
case nebula.PskTransitionalSending:
|
||||||
|
theirPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "transitional-sending", "keys": []string{"this is a key"}}}}
|
||||||
|
case nebula.PskEnforced:
|
||||||
|
theirPskSettings = &m{"handshakes": &m{"psk": &m{"mode": "enforced", "keys": []string{"this is a key"}}}}
|
||||||
|
}
|
||||||
|
|
||||||
|
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, myPskSettings)
|
||||||
|
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, theirPskSettings)
|
||||||
|
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
|
||||||
|
r := router.NewR(myControl, theirControl)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Route until we see our cached packet flow")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
|
||||||
|
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||||
|
h := &header.H{}
|
||||||
|
err := h.Parse(p.Data)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is the stage 1 handshake packet and I am configured to send with a psk, my cert name should
|
||||||
|
// not appear. It would likely be more obvious to unmarshal the payload and check but this works fine for now
|
||||||
|
if test.myPskMode == nebula.PskEnforced || test.myPskMode == nebula.PskTransitionalSending {
|
||||||
|
if h.Type == 0 && h.MessageCounter == 1 {
|
||||||
|
assert.NotContains(t, string(p.Data), "test me")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 {
|
||||||
|
return router.RouteAndExit
|
||||||
|
}
|
||||||
|
|
||||||
|
return router.KeepRouting
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Log("My cached packet should be received by them")
|
||||||
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
|
||||||
|
|
||||||
|
t.Log("Test the tunnel with them")
|
||||||
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
||||||
|
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
//TODO: assert hostmaps
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
330
e2e/helpers_test.go
Normal file
330
e2e/helpers_test.go
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/gopacket"
|
||||||
|
"github.com/google/gopacket/layers"
|
||||||
|
"github.com/imdario/mergo"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/crypto/curve25519"
|
||||||
|
"golang.org/x/crypto/ed25519"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type m map[string]interface{}
|
||||||
|
|
||||||
|
// newSimpleServer creates a nebula instance with many assumptions
|
||||||
|
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, customConfig *m) (*nebula.Control, net.IP, *net.UDPAddr) {
|
||||||
|
l := NewTestLogger()
|
||||||
|
|
||||||
|
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
||||||
|
copy(vpnIpNet.IP, udpIp)
|
||||||
|
vpnIpNet.IP[1] += 128
|
||||||
|
udpAddr := net.UDPAddr{
|
||||||
|
IP: udpIp,
|
||||||
|
Port: 4242,
|
||||||
|
}
|
||||||
|
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, "test "+name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||||
|
|
||||||
|
caB, err := caCrt.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mc := m{
|
||||||
|
"pki": m{
|
||||||
|
"ca": string(caB),
|
||||||
|
"cert": string(myPEM),
|
||||||
|
"key": string(myPrivKey),
|
||||||
|
},
|
||||||
|
//"tun": m{"disabled": true},
|
||||||
|
"firewall": m{
|
||||||
|
"outbound": []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"host": "any",
|
||||||
|
}},
|
||||||
|
"inbound": []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"host": "any",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
//"handshakes": m{
|
||||||
|
// "try_interval": "1s",
|
||||||
|
//},
|
||||||
|
"listen": m{
|
||||||
|
"host": udpAddr.IP.String(),
|
||||||
|
"port": udpAddr.Port,
|
||||||
|
},
|
||||||
|
"logging": m{
|
||||||
|
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
||||||
|
"level": l.Level.String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cb, err := yaml.Marshal(mc)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
c.LoadString(string(cb))
|
||||||
|
|
||||||
|
if customConfig != nil {
|
||||||
|
ccb, err := yaml.Marshal(customConfig)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ccm := map[interface{}]interface{}{}
|
||||||
|
err = yaml.Unmarshal(ccb, &ccm)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mergo.Merge(&c.Settings, ccm, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
control, err := nebula.Main(c, false, "e2e-test", l, nil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return control, vpnIpNet.IP, &udpAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTestCaCert will generate a CA cert
|
||||||
|
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
|
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
nc := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test ca",
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: true,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ips) > 0 {
|
||||||
|
nc.Details.Ips = ips
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subnets) > 0 {
|
||||||
|
nc.Details.Subnets = subnets
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(groups) > 0 {
|
||||||
|
nc.Details.Groups = groups
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(priv)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := nc.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nc, pub, priv, pem
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTestCert will generate a signed certificate with the provided details.
|
||||||
|
// Expiry times are defaulted if you do not pass them in
|
||||||
|
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
|
issuer, err := ca.Sha256Sum()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub, rawPriv := x25519Keypair()
|
||||||
|
|
||||||
|
nc := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: name,
|
||||||
|
Ips: []*net.IPNet{ip},
|
||||||
|
Subnets: subnets,
|
||||||
|
Groups: groups,
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: issuer,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := nc.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||||
|
}
|
||||||
|
|
||||||
|
func x25519Keypair() ([]byte, []byte) {
|
||||||
|
privkey := make([]byte, 32)
|
||||||
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubkey, privkey
|
||||||
|
}
|
||||||
|
|
||||||
|
type doneCb func()
|
||||||
|
|
||||||
|
func deadline(t *testing.T, seconds time.Duration) doneCb {
|
||||||
|
timeout := time.After(seconds * time.Second)
|
||||||
|
done := make(chan bool)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
t.Fatal("Test did not finish in time")
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
done <- true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
||||||
|
// Send a packet from them to me
|
||||||
|
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
||||||
|
bPacket := r.RouteUntilTxTun(controlB, controlA)
|
||||||
|
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
||||||
|
|
||||||
|
// And once more from me to them
|
||||||
|
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
|
||||||
|
aPacket := r.RouteUntilTxTun(controlA, controlB)
|
||||||
|
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) {
|
||||||
|
// Get both host infos
|
||||||
|
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false)
|
||||||
|
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
|
||||||
|
|
||||||
|
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false)
|
||||||
|
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
|
||||||
|
|
||||||
|
// Check that both vpn and real addr are correct
|
||||||
|
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
|
||||||
|
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
|
||||||
|
|
||||||
|
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A")
|
||||||
|
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B")
|
||||||
|
|
||||||
|
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
|
||||||
|
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
|
||||||
|
|
||||||
|
// Check that our indexes match
|
||||||
|
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
|
||||||
|
assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index")
|
||||||
|
|
||||||
|
//TODO: Would be nice to assert this memory
|
||||||
|
//checkIndexes := func(name string, hm *HostMap, hi *HostInfo) {
|
||||||
|
// hBbyIndex := hmA.Indexes[hBinA.localIndexId]
|
||||||
|
// assert.NotNil(t, hBbyIndex, "Could not host info by local index in %s", name)
|
||||||
|
// assert.Equal(t, &hBbyIndex, &hBinA, "%s Indexes map did not point to the right host info", name)
|
||||||
|
//
|
||||||
|
// //TODO: remote indexes are susceptible to collision
|
||||||
|
// hBbyRemoteIndex := hmA.RemoteIndexes[hBinA.remoteIndexId]
|
||||||
|
// assert.NotNil(t, hBbyIndex, "Could not host info by remote index in %s", name)
|
||||||
|
// assert.Equal(t, &hBbyRemoteIndex, &hBinA, "%s RemoteIndexes did not point to the right host info", name)
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//// Check hostmap indexes too
|
||||||
|
//checkIndexes("hmA", hmA, hBinA)
|
||||||
|
//checkIndexes("hmB", hmB, hAinB)
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) {
|
||||||
|
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||||
|
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||||
|
assert.NotNil(t, v4, "No ipv4 data found")
|
||||||
|
|
||||||
|
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect")
|
||||||
|
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect")
|
||||||
|
|
||||||
|
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||||
|
assert.NotNil(t, udp, "No udp data found")
|
||||||
|
|
||||||
|
assert.Equal(t, fromPort, uint16(udp.SrcPort), "Source port was incorrect")
|
||||||
|
assert.Equal(t, toPort, uint16(udp.DstPort), "Dest port was incorrect")
|
||||||
|
|
||||||
|
data := packet.ApplicationLayer()
|
||||||
|
assert.NotNil(t, data)
|
||||||
|
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTestLogger() *logrus.Logger {
|
||||||
|
l := logrus.New()
|
||||||
|
|
||||||
|
v := os.Getenv("TEST_LOGS")
|
||||||
|
if v == "" {
|
||||||
|
l.SetOutput(ioutil.Discard)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v {
|
||||||
|
case "2":
|
||||||
|
l.SetLevel(logrus.DebugLevel)
|
||||||
|
case "3":
|
||||||
|
l.SetLevel(logrus.TraceLevel)
|
||||||
|
default:
|
||||||
|
l.SetLevel(logrus.InfoLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
3
e2e/router/doc.go
Normal file
3
e2e/router/doc.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
package router
|
||||||
|
|
||||||
|
// This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before
|
||||||
323
e2e/router/router.go
Normal file
323
e2e/router/router.go
Normal file
@@ -0,0 +1,323 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package router
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type R struct {
|
||||||
|
// Simple map of the ip:port registered on a control to the control
|
||||||
|
// Basically a router, right?
|
||||||
|
controls map[string]*nebula.Control
|
||||||
|
|
||||||
|
// A map for inbound packets for a control that doesn't know about this address
|
||||||
|
inNat map[string]*nebula.Control
|
||||||
|
|
||||||
|
// A last used map, if an inbound packet hit the inNat map then
|
||||||
|
// all return packets should use the same last used inbound address for the outbound sender
|
||||||
|
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
|
||||||
|
outNat map[string]net.UDPAddr
|
||||||
|
|
||||||
|
// All interactions are locked to help serialize behavior
|
||||||
|
sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExitType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Keeps routing, the function will get called again on the next packet
|
||||||
|
KeepRouting ExitType = 0
|
||||||
|
// Does not route this packet and exits immediately
|
||||||
|
ExitNow ExitType = 1
|
||||||
|
// Routes this packet and exits immediately afterwards
|
||||||
|
RouteAndExit ExitType = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
type ExitFunc func(packet *udp.Packet, receiver *nebula.Control) ExitType
|
||||||
|
|
||||||
|
func NewR(controls ...*nebula.Control) *R {
|
||||||
|
r := &R{
|
||||||
|
controls: make(map[string]*nebula.Control),
|
||||||
|
inNat: make(map[string]*nebula.Control),
|
||||||
|
outNat: make(map[string]net.UDPAddr),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range controls {
|
||||||
|
addr := c.GetUDPAddr()
|
||||||
|
if _, ok := r.controls[addr]; ok {
|
||||||
|
panic("Duplicate listen address: " + addr)
|
||||||
|
}
|
||||||
|
r.controls[addr] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRoute will place the nebula controller at the ip and port specified.
|
||||||
|
// It does not look at the addr attached to the instance.
|
||||||
|
// If a route is used, this will behave like a NAT for the return path.
|
||||||
|
// Rewriting the source ip:port to what was last sent to from the origin
|
||||||
|
func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port))
|
||||||
|
if _, ok := r.inNat[inAddr]; ok {
|
||||||
|
panic("Duplicate listen address inNat: " + inAddr)
|
||||||
|
}
|
||||||
|
r.inNat[inAddr] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnceFrom will route a single packet from sender then return
|
||||||
|
// If the router doesn't have the nebula controller for that address, we panic
|
||||||
|
func (r *R) OnceFrom(sender *nebula.Control) {
|
||||||
|
r.RouteExitFunc(sender, func(*udp.Packet, *nebula.Control) ExitType {
|
||||||
|
return RouteAndExit
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteUntilTxTun will route for sender and return when a packet is seen on receivers tun
|
||||||
|
// If the router doesn't have the nebula controller for that address, we panic
|
||||||
|
func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []byte {
|
||||||
|
tunTx := receiver.GetTunTxChan()
|
||||||
|
udpTx := sender.GetUDPTxChan()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// Maybe we already have something on the tun for us
|
||||||
|
case b := <-tunTx:
|
||||||
|
return b
|
||||||
|
|
||||||
|
// Nope, lets push the sender along
|
||||||
|
case p := <-udpTx:
|
||||||
|
outAddr := sender.GetUDPAddr()
|
||||||
|
r.Lock()
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
c := r.getControl(outAddr, inAddr, p)
|
||||||
|
if c == nil {
|
||||||
|
r.Unlock()
|
||||||
|
panic("No control for udp tx")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.InjectUDPPacket(p)
|
||||||
|
r.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteExitFunc will call the whatDo func with each udp packet from sender.
|
||||||
|
// whatDo can return:
|
||||||
|
// - exitNow: the packet will not be routed and this call will return immediately
|
||||||
|
// - routeAndExit: this call will return immediately after routing the last packet from sender
|
||||||
|
// - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender
|
||||||
|
func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
|
||||||
|
h := &header.H{}
|
||||||
|
for {
|
||||||
|
p := sender.GetFromUDP(true)
|
||||||
|
r.Lock()
|
||||||
|
if err := h.Parse(p.Data); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
outAddr := sender.GetUDPAddr()
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
receiver := r.getControl(outAddr, inAddr, p)
|
||||||
|
if receiver == nil {
|
||||||
|
r.Unlock()
|
||||||
|
panic("Can't route for host: " + inAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
e := whatDo(p, receiver)
|
||||||
|
switch e {
|
||||||
|
case ExitNow:
|
||||||
|
r.Unlock()
|
||||||
|
return
|
||||||
|
|
||||||
|
case RouteAndExit:
|
||||||
|
receiver.InjectUDPPacket(p)
|
||||||
|
r.Unlock()
|
||||||
|
return
|
||||||
|
|
||||||
|
case KeepRouting:
|
||||||
|
receiver.InjectUDPPacket(p)
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteUntilAfterMsgType will route for sender until a message type is seen and sent from sender
|
||||||
|
// If the router doesn't have the nebula controller for that address, we panic
|
||||||
|
func (r *R) RouteUntilAfterMsgType(sender *nebula.Control, msgType header.MessageType, subType header.MessageSubType) {
|
||||||
|
h := &header.H{}
|
||||||
|
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
|
||||||
|
if err := h.Parse(p.Data); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if h.Type == msgType && h.Subtype == subType {
|
||||||
|
return RouteAndExit
|
||||||
|
}
|
||||||
|
|
||||||
|
return KeepRouting
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
||||||
|
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
|
||||||
|
// If the router doesn't have the nebula controller for that address, we panic
|
||||||
|
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) {
|
||||||
|
if finish == KeepRouting {
|
||||||
|
finish = RouteAndExit
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
|
||||||
|
if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) {
|
||||||
|
return finish
|
||||||
|
}
|
||||||
|
|
||||||
|
return KeepRouting
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteForAllExitFunc will route for every registered controller and calls the whatDo func with each udp packet from
|
||||||
|
// whatDo can return:
|
||||||
|
// - exitNow: the packet will not be routed and this call will return immediately
|
||||||
|
// - routeAndExit: this call will return immediately after routing the last packet from sender
|
||||||
|
// - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender
|
||||||
|
func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
|
||||||
|
sc := make([]reflect.SelectCase, len(r.controls))
|
||||||
|
cm := make([]*nebula.Control, len(r.controls))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for _, c := range r.controls {
|
||||||
|
sc[i] = reflect.SelectCase{
|
||||||
|
Dir: reflect.SelectRecv,
|
||||||
|
Chan: reflect.ValueOf(c.GetUDPTxChan()),
|
||||||
|
Send: reflect.Value{},
|
||||||
|
}
|
||||||
|
|
||||||
|
cm[i] = c
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
x, rx, _ := reflect.Select(sc)
|
||||||
|
r.Lock()
|
||||||
|
|
||||||
|
p := rx.Interface().(*udp.Packet)
|
||||||
|
|
||||||
|
outAddr := cm[x].GetUDPAddr()
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
receiver := r.getControl(outAddr, inAddr, p)
|
||||||
|
if receiver == nil {
|
||||||
|
r.Unlock()
|
||||||
|
panic("Can't route for host: " + inAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
e := whatDo(p, receiver)
|
||||||
|
switch e {
|
||||||
|
case ExitNow:
|
||||||
|
r.Unlock()
|
||||||
|
return
|
||||||
|
|
||||||
|
case RouteAndExit:
|
||||||
|
receiver.InjectUDPPacket(p)
|
||||||
|
r.Unlock()
|
||||||
|
return
|
||||||
|
|
||||||
|
case KeepRouting:
|
||||||
|
receiver.InjectUDPPacket(p)
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
|
||||||
|
}
|
||||||
|
r.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlushAll will route for every registered controller, exiting once there are no packets left to route
|
||||||
|
func (r *R) FlushAll() {
|
||||||
|
sc := make([]reflect.SelectCase, len(r.controls))
|
||||||
|
cm := make([]*nebula.Control, len(r.controls))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for _, c := range r.controls {
|
||||||
|
sc[i] = reflect.SelectCase{
|
||||||
|
Dir: reflect.SelectRecv,
|
||||||
|
Chan: reflect.ValueOf(c.GetUDPTxChan()),
|
||||||
|
Send: reflect.Value{},
|
||||||
|
}
|
||||||
|
|
||||||
|
cm[i] = c
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a default case to exit when nothing is left to send
|
||||||
|
sc = append(sc, reflect.SelectCase{
|
||||||
|
Dir: reflect.SelectDefault,
|
||||||
|
Chan: reflect.Value{},
|
||||||
|
Send: reflect.Value{},
|
||||||
|
})
|
||||||
|
|
||||||
|
for {
|
||||||
|
x, rx, ok := reflect.Select(sc)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.Lock()
|
||||||
|
|
||||||
|
p := rx.Interface().(*udp.Packet)
|
||||||
|
|
||||||
|
outAddr := cm[x].GetUDPAddr()
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
receiver := r.getControl(outAddr, inAddr, p)
|
||||||
|
if receiver == nil {
|
||||||
|
r.Unlock()
|
||||||
|
panic("Can't route for host: " + inAddr)
|
||||||
|
}
|
||||||
|
r.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
|
||||||
|
// This is an internal router function, the caller must hold the lock
|
||||||
|
func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control {
|
||||||
|
if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok {
|
||||||
|
p.FromIp = newAddr.IP
|
||||||
|
p.FromPort = uint16(newAddr.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, ok := r.inNat[toAddr]
|
||||||
|
if ok {
|
||||||
|
sHost, sPort, err := net.SplitHostPort(toAddr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := strconv.Atoi(sPort)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{
|
||||||
|
IP: net.ParseIP(sHost),
|
||||||
|
Port: port,
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.controls[toAddr]
|
||||||
|
}
|
||||||
@@ -7,9 +7,11 @@ pki:
|
|||||||
ca: /etc/nebula/ca.crt
|
ca: /etc/nebula/ca.crt
|
||||||
cert: /etc/nebula/host.crt
|
cert: /etc/nebula/host.crt
|
||||||
key: /etc/nebula/host.key
|
key: /etc/nebula/host.key
|
||||||
#blacklist is a list of certificate fingerprints that we will refuse to talk to
|
# blocklist is a list of certificate fingerprints that we will refuse to talk to
|
||||||
#blacklist:
|
#blocklist:
|
||||||
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
||||||
|
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
||||||
|
#disconnect_invalid: false
|
||||||
|
|
||||||
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
||||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||||
@@ -56,6 +58,14 @@ lighthouse:
|
|||||||
#"10.0.0.0/8": false
|
#"10.0.0.0/8": false
|
||||||
#"10.42.42.0/24": true
|
#"10.42.42.0/24": true
|
||||||
|
|
||||||
|
# EXPERIMENTAL: This option my change or disappear in the future.
|
||||||
|
# Optionally allows the definition of remote_allow_list blocks
|
||||||
|
# specific to an inside VPN IP CIDR.
|
||||||
|
#remote_allow_ranges:
|
||||||
|
# This rule would only allow only private IPs for this VPN range
|
||||||
|
#"10.42.42.0/24":
|
||||||
|
#"192.168.0.0/16": true
|
||||||
|
|
||||||
# local_allow_list allows you to filter which local IP addresses we advertise
|
# local_allow_list allows you to filter which local IP addresses we advertise
|
||||||
# to the lighthouses. This uses the same logic as `remote_allow_list`, but
|
# to the lighthouses. This uses the same logic as `remote_allow_list`, but
|
||||||
# additionally, you can specify an `interfaces` map of regular expressions
|
# additionally, you can specify an `interfaces` map of regular expressions
|
||||||
@@ -64,7 +74,7 @@ lighthouse:
|
|||||||
# the inverse). CIDR rules are matched after interface name rules.
|
# the inverse). CIDR rules are matched after interface name rules.
|
||||||
# Default is all local IP addresses.
|
# Default is all local IP addresses.
|
||||||
#local_allow_list:
|
#local_allow_list:
|
||||||
# Example to blacklist tun0 and all docker interfaces.
|
# Example to block tun0 and all docker interfaces.
|
||||||
#interfaces:
|
#interfaces:
|
||||||
#tun0: false
|
#tun0: false
|
||||||
#'docker.*': false
|
#'docker.*': false
|
||||||
@@ -74,6 +84,7 @@ lighthouse:
|
|||||||
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
||||||
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
||||||
listen:
|
listen:
|
||||||
|
# To listen on both any ipv4 and ipv6 use "[::]"
|
||||||
host: 0.0.0.0
|
host: 0.0.0.0
|
||||||
port: 4242
|
port: 4242
|
||||||
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
||||||
@@ -86,6 +97,15 @@ listen:
|
|||||||
#read_buffer: 10485760
|
#read_buffer: 10485760
|
||||||
#write_buffer: 10485760
|
#write_buffer: 10485760
|
||||||
|
|
||||||
|
# EXPERIMENTAL: This option is currently only supported on linux and may
|
||||||
|
# change in future minor releases.
|
||||||
|
#
|
||||||
|
# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
|
||||||
|
# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
|
||||||
|
# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
|
||||||
|
# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
|
||||||
|
#routines: 1
|
||||||
|
|
||||||
punchy:
|
punchy:
|
||||||
# Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
|
# Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
|
||||||
punch: true
|
punch: true
|
||||||
@@ -98,13 +118,15 @@ punchy:
|
|||||||
# delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
|
# delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
|
||||||
#delay: 1s
|
#delay: 1s
|
||||||
|
|
||||||
# Cipher allows you to choose between the available ciphers for your network.
|
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
|
||||||
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
|
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
|
||||||
#cipher: chachapoly
|
#cipher: chachapoly
|
||||||
|
|
||||||
# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
|
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
||||||
# path to a network adjacent nebula node.
|
# path to a network adjacent nebula node.
|
||||||
#local_range: "172.16.0.0/24"
|
# NOTE: the previous option "local_range" only allowed definition of a single range
|
||||||
|
# and has been deprecated for "preferred_ranges"
|
||||||
|
#preferred_ranges: ["172.16.0.0/24"]
|
||||||
|
|
||||||
# sshd can expose informational and administrative functions via ssh this is a
|
# sshd can expose informational and administrative functions via ssh this is a
|
||||||
#sshd:
|
#sshd:
|
||||||
@@ -124,6 +146,8 @@ punchy:
|
|||||||
|
|
||||||
# Configure the private interface. Note: addr is baked into the nebula certificate
|
# Configure the private interface. Note: addr is baked into the nebula certificate
|
||||||
tun:
|
tun:
|
||||||
|
# When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
|
||||||
|
disabled: false
|
||||||
# Name of the device
|
# Name of the device
|
||||||
dev: nebula1
|
dev: nebula1
|
||||||
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
||||||
@@ -141,10 +165,13 @@ tun:
|
|||||||
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
||||||
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
||||||
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
||||||
|
# `mtu` will default to tun mtu if this option is not specified
|
||||||
|
# `metric` will default to 0 if this option is not specified
|
||||||
unsafe_routes:
|
unsafe_routes:
|
||||||
#- route: 172.16.1.0/24
|
#- route: 172.16.1.0/24
|
||||||
# via: 192.168.100.99
|
# via: 192.168.100.99
|
||||||
# mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
|
# mtu: 1300
|
||||||
|
# metric: 100
|
||||||
|
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
@@ -154,6 +181,8 @@ logging:
|
|||||||
level: info
|
level: info
|
||||||
# json or text formats currently available. Default is text
|
# json or text formats currently available. Default is text
|
||||||
format: text
|
format: text
|
||||||
|
# Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
|
||||||
|
#disable_timestamp: true
|
||||||
# timestamp format is specified in Go time format, see:
|
# timestamp format is specified in Go time format, see:
|
||||||
# https://golang.org/pkg/time/#pkg-constants
|
# https://golang.org/pkg/time/#pkg-constants
|
||||||
# default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
|
# default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
|
||||||
@@ -177,19 +206,59 @@ logging:
|
|||||||
#subsystem: nebula
|
#subsystem: nebula
|
||||||
#interval: 10s
|
#interval: 10s
|
||||||
|
|
||||||
|
# enables counter metrics for meta packets
|
||||||
|
# e.g.: `messages.tx.handshake`
|
||||||
|
# NOTE: `message.{tx,rx}.recv_error` is always emitted
|
||||||
|
#message_metrics: false
|
||||||
|
|
||||||
|
# enables detailed counter metrics for lighthouse packets
|
||||||
|
# e.g.: `lighthouse.rx.HostQuery`
|
||||||
|
#lighthouse_metrics: false
|
||||||
|
|
||||||
# Handshake Manger Settings
|
# Handshake Manger Settings
|
||||||
#handshakes:
|
handshakes:
|
||||||
# Total time to try a handshake = sequence of `try_interval * retries`
|
# Handshakes are sent to all known addresses at each interval with a linear backoff,
|
||||||
# With 100ms interval and 20 retries it is 23.5 seconds
|
# Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
|
||||||
|
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
|
||||||
#try_interval: 100ms
|
#try_interval: 100ms
|
||||||
#retries: 20
|
#retries: 20
|
||||||
# wait_rotation is the number of handshake attempts to do before starting to try non-local IP addresses
|
|
||||||
#wait_rotation: 5
|
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
|
||||||
|
# after receiving the response for lighthouse queries
|
||||||
|
#trigger_buffer: 64
|
||||||
|
|
||||||
|
# psk can be used to mask the contents of handshakes and makes handshaking with unintended recipients more difficult
|
||||||
|
# all settings respond to a reload
|
||||||
|
psk:
|
||||||
|
# mode defines how the pre shared keys can be used in a handshake
|
||||||
|
# `none` (the default) does not send or receive using a psk. Ideally `enforced` is used
|
||||||
|
# `transitional-accepting` will send handshakes without using a psk and can receive handshakes using a psk we know about
|
||||||
|
# `transitional-sending` will send handshakes using a psk but will still accept handshakes without them
|
||||||
|
# `enforced` enforces the use of a psk for all tunnels. Any node not also using `enforced` or `transitional-sending` can not handshake with us
|
||||||
|
#
|
||||||
|
# When moving from `none` to `enforced` you will want to change every node in the mesh to `transitional-accepting` and reload
|
||||||
|
# then move every node to `transitional-sending` then reload, and finally `enforced` then reload. This allows you to
|
||||||
|
# avoid stopping the world to use psk. You must ensure at `transitional-accepting` that all nodes have the same psks.
|
||||||
|
#mode: none
|
||||||
|
|
||||||
|
# In `transitional-accepting`, `transitional-sending` and `enforced` modes, the keys provided here are sent through
|
||||||
|
# hkdf with the intended recipients ip used in the info section. This helps guard against handshaking with the wrong
|
||||||
|
# host if your static_host_map or lighthouse(s) has incorrect information.
|
||||||
|
#
|
||||||
|
# Setting keys if mode is `none` has no effect.
|
||||||
|
#
|
||||||
|
# Only the first key is used for outbound handshakes but all keys provided will be tried in the order specified, on
|
||||||
|
# incoming handshakes. This is to allow for psk rotation.
|
||||||
|
#keys:
|
||||||
|
# - shared secret string, this one is used in all outbound handshakes
|
||||||
|
# - this is a fallback key, received handshakes can use this
|
||||||
|
# - another fallback, received handshakes can use this one too
|
||||||
|
# - "\x68\x65\x6c\x6c\x6f\x20\x66\x72\x69\x65\x6e\x64\x73" # for raw bytes if you desire
|
||||||
|
|
||||||
# Nebula security group configuration
|
# Nebula security group configuration
|
||||||
firewall:
|
firewall:
|
||||||
conntrack:
|
conntrack:
|
||||||
tcp_timeout: 120h
|
tcp_timeout: 12m
|
||||||
udp_timeout: 3m
|
udp_timeout: 3m
|
||||||
default_timeout: 10m
|
default_timeout: 10m
|
||||||
max_connections: 100000
|
max_connections: 100000
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ After=basic.target network.target
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
SyslogIdentifier=nebula
|
SyslogIdentifier=nebula
|
||||||
StandardOutput=syslog
|
|
||||||
StandardError=syslog
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|||||||
@@ -2,11 +2,10 @@
|
|||||||
Description=nebula
|
Description=nebula
|
||||||
Wants=basic.target
|
Wants=basic.target
|
||||||
After=basic.target network.target
|
After=basic.target network.target
|
||||||
|
Before=sshd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
SyslogIdentifier=nebula
|
SyslogIdentifier=nebula
|
||||||
StandardOutput=syslog
|
|
||||||
StandardError=syslog
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|||||||
333
firewall.go
333
firewall.go
@@ -1,32 +1,24 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
)
|
"github.com/slackhq/nebula/cidr"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
const (
|
"github.com/slackhq/nebula/firewall"
|
||||||
fwProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
|
|
||||||
fwProtoTCP = 6
|
|
||||||
fwProtoUDP = 17
|
|
||||||
fwProtoICMP = 1
|
|
||||||
|
|
||||||
fwPortAny = 0 // Special value for matching `port: any`
|
|
||||||
fwPortFragment = -1 // Special value for matching `port: fragment`
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const tcpACK = 0x10
|
const tcpACK = 0x10
|
||||||
@@ -38,13 +30,19 @@ type FirewallInterface interface {
|
|||||||
|
|
||||||
type conn struct {
|
type conn struct {
|
||||||
Expires time.Time // Time when this conntrack entry will expire
|
Expires time.Time // Time when this conntrack entry will expire
|
||||||
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
|
|
||||||
Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set
|
Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set
|
||||||
|
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
|
||||||
|
|
||||||
|
// record why the original connection passed the firewall, so we can re-validate
|
||||||
|
// after ruleset changes. Note, rulesVersion is a uint16 so that these two
|
||||||
|
// fields pack for free after the uint32 above
|
||||||
|
incoming bool
|
||||||
|
rulesVersion uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: need conntrack max tracked connections handling
|
// TODO: need conntrack max tracked connections handling
|
||||||
type Firewall struct {
|
type Firewall struct {
|
||||||
Conns map[FirewallPacket]*conn
|
Conntrack *FirewallConntrack
|
||||||
|
|
||||||
InRules *FirewallTable
|
InRules *FirewallTable
|
||||||
OutRules *FirewallTable
|
OutRules *FirewallTable
|
||||||
@@ -55,16 +53,31 @@ type Firewall struct {
|
|||||||
UDPTimeout time.Duration //linux: 180s max
|
UDPTimeout time.Duration //linux: 180s max
|
||||||
DefaultTimeout time.Duration //linux: 600s
|
DefaultTimeout time.Duration //linux: 600s
|
||||||
|
|
||||||
TimerWheel *TimerWheel
|
|
||||||
|
|
||||||
// Used to ensure we don't emit local packets for ips we don't own
|
// Used to ensure we don't emit local packets for ips we don't own
|
||||||
localIps *CIDRTree
|
localIps *cidr.Tree4
|
||||||
|
|
||||||
connMutex sync.Mutex
|
|
||||||
rules string
|
rules string
|
||||||
|
rulesVersion uint16
|
||||||
|
|
||||||
trackTCPRTT bool
|
trackTCPRTT bool
|
||||||
metricTCPRTT metrics.Histogram
|
metricTCPRTT metrics.Histogram
|
||||||
|
incomingMetrics firewallMetrics
|
||||||
|
outgoingMetrics firewallMetrics
|
||||||
|
|
||||||
|
l *logrus.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
type firewallMetrics struct {
|
||||||
|
droppedLocalIP metrics.Counter
|
||||||
|
droppedRemoteIP metrics.Counter
|
||||||
|
droppedNoRule metrics.Counter
|
||||||
|
}
|
||||||
|
|
||||||
|
type FirewallConntrack struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
Conns map[firewall.Packet]*conn
|
||||||
|
TimerWheel *TimerWheel
|
||||||
}
|
}
|
||||||
|
|
||||||
type FirewallTable struct {
|
type FirewallTable struct {
|
||||||
@@ -94,57 +107,15 @@ type FirewallRule struct {
|
|||||||
Any bool
|
Any bool
|
||||||
Hosts map[string]struct{}
|
Hosts map[string]struct{}
|
||||||
Groups [][]string
|
Groups [][]string
|
||||||
CIDR *CIDRTree
|
CIDR *cidr.Tree4
|
||||||
}
|
}
|
||||||
|
|
||||||
// Even though ports are uint16, int32 maps are faster for lookup
|
// Even though ports are uint16, int32 maps are faster for lookup
|
||||||
// Plus we can use `-1` for fragment rules
|
// Plus we can use `-1` for fragment rules
|
||||||
type firewallPort map[int32]*FirewallCA
|
type firewallPort map[int32]*FirewallCA
|
||||||
|
|
||||||
type FirewallPacket struct {
|
|
||||||
LocalIP uint32
|
|
||||||
RemoteIP uint32
|
|
||||||
LocalPort uint16
|
|
||||||
RemotePort uint16
|
|
||||||
Protocol uint8
|
|
||||||
Fragment bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fp *FirewallPacket) Copy() *FirewallPacket {
|
|
||||||
return &FirewallPacket{
|
|
||||||
LocalIP: fp.LocalIP,
|
|
||||||
RemoteIP: fp.RemoteIP,
|
|
||||||
LocalPort: fp.LocalPort,
|
|
||||||
RemotePort: fp.RemotePort,
|
|
||||||
Protocol: fp.Protocol,
|
|
||||||
Fragment: fp.Fragment,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fp FirewallPacket) MarshalJSON() ([]byte, error) {
|
|
||||||
var proto string
|
|
||||||
switch fp.Protocol {
|
|
||||||
case fwProtoTCP:
|
|
||||||
proto = "tcp"
|
|
||||||
case fwProtoICMP:
|
|
||||||
proto = "icmp"
|
|
||||||
case fwProtoUDP:
|
|
||||||
proto = "udp"
|
|
||||||
default:
|
|
||||||
proto = fmt.Sprintf("unknown %v", fp.Protocol)
|
|
||||||
}
|
|
||||||
return json.Marshal(m{
|
|
||||||
"LocalIP": int2ip(fp.LocalIP).String(),
|
|
||||||
"RemoteIP": int2ip(fp.RemoteIP).String(),
|
|
||||||
"LocalPort": fp.LocalPort,
|
|
||||||
"RemotePort": fp.RemotePort,
|
|
||||||
"Protocol": proto,
|
|
||||||
"Fragment": fp.Fragment,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
||||||
func NewFirewall(tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
|
func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
|
||||||
//TODO: error on 0 duration
|
//TODO: error on 0 duration
|
||||||
var min, max time.Duration
|
var min, max time.Duration
|
||||||
|
|
||||||
@@ -162,7 +133,7 @@ func NewFirewall(tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.N
|
|||||||
max = defaultTimeout
|
max = defaultTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
localIps := NewCIDRTree()
|
localIps := cidr.NewTree4()
|
||||||
for _, ip := range c.Details.Ips {
|
for _, ip := range c.Details.Ips {
|
||||||
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||||
}
|
}
|
||||||
@@ -172,20 +143,35 @@ func NewFirewall(tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.N
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &Firewall{
|
return &Firewall{
|
||||||
Conns: make(map[FirewallPacket]*conn),
|
Conntrack: &FirewallConntrack{
|
||||||
|
Conns: make(map[firewall.Packet]*conn),
|
||||||
|
TimerWheel: NewTimerWheel(min, max),
|
||||||
|
},
|
||||||
InRules: newFirewallTable(),
|
InRules: newFirewallTable(),
|
||||||
OutRules: newFirewallTable(),
|
OutRules: newFirewallTable(),
|
||||||
TimerWheel: NewTimerWheel(min, max),
|
|
||||||
TCPTimeout: tcpTimeout,
|
TCPTimeout: tcpTimeout,
|
||||||
UDPTimeout: UDPTimeout,
|
UDPTimeout: UDPTimeout,
|
||||||
DefaultTimeout: defaultTimeout,
|
DefaultTimeout: defaultTimeout,
|
||||||
localIps: localIps,
|
localIps: localIps,
|
||||||
|
l: l,
|
||||||
|
|
||||||
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||||
|
incomingMetrics: firewallMetrics{
|
||||||
|
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
|
||||||
|
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
|
||||||
|
droppedNoRule: metrics.GetOrRegisterCounter("firewall.incoming.dropped.no_rule", nil),
|
||||||
|
},
|
||||||
|
outgoingMetrics: firewallMetrics{
|
||||||
|
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.local_ip", nil),
|
||||||
|
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.remote_ip", nil),
|
||||||
|
droppedNoRule: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.no_rule", nil),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFirewallFromConfig(nc *cert.NebulaCertificate, c *Config) (*Firewall, error) {
|
func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *config.C) (*Firewall, error) {
|
||||||
fw := NewFirewall(
|
fw := NewFirewall(
|
||||||
|
l,
|
||||||
c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12),
|
c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12),
|
||||||
c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3),
|
c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3),
|
||||||
c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10),
|
c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10),
|
||||||
@@ -193,12 +179,12 @@ func NewFirewallFromConfig(nc *cert.NebulaCertificate, c *Config) (*Firewall, er
|
|||||||
//TODO: max_connections
|
//TODO: max_connections
|
||||||
)
|
)
|
||||||
|
|
||||||
err := AddFirewallRulesFromConfig(false, c, fw)
|
err := AddFirewallRulesFromConfig(l, false, c, fw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = AddFirewallRulesFromConfig(true, c, fw)
|
err = AddFirewallRulesFromConfig(l, true, c, fw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -208,11 +194,17 @@ func NewFirewallFromConfig(nc *cert.NebulaCertificate, c *Config) (*Firewall, er
|
|||||||
|
|
||||||
// AddRule properly creates the in memory rule structure for a firewall table.
|
// AddRule properly creates the in memory rule structure for a firewall table.
|
||||||
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
|
||||||
|
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
||||||
|
// https://github.com/golang/go/issues/14131
|
||||||
|
sIp := ""
|
||||||
|
if ip != nil {
|
||||||
|
sIp = ip.String()
|
||||||
|
}
|
||||||
|
|
||||||
// We need this rule string because we generate a hash. Removing this will break firewall reload.
|
// We need this rule string because we generate a hash. Removing this will break firewall reload.
|
||||||
ruleString := fmt.Sprintf(
|
ruleString := fmt.Sprintf(
|
||||||
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s",
|
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s",
|
||||||
incoming, proto, startPort, endPort, groups, host, ip, caName, caSha,
|
incoming, proto, startPort, endPort, groups, host, sIp, caName, caSha,
|
||||||
)
|
)
|
||||||
f.rules += ruleString + "\n"
|
f.rules += ruleString + "\n"
|
||||||
|
|
||||||
@@ -220,7 +212,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
|||||||
if !incoming {
|
if !incoming {
|
||||||
direction = "outgoing"
|
direction = "outgoing"
|
||||||
}
|
}
|
||||||
l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": ip, "caName": caName, "caSha": caSha}).
|
f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "caName": caName, "caSha": caSha}).
|
||||||
Info("Firewall rule added")
|
Info("Firewall rule added")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -235,13 +227,13 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch proto {
|
switch proto {
|
||||||
case fwProtoTCP:
|
case firewall.ProtoTCP:
|
||||||
fp = ft.TCP
|
fp = ft.TCP
|
||||||
case fwProtoUDP:
|
case firewall.ProtoUDP:
|
||||||
fp = ft.UDP
|
fp = ft.UDP
|
||||||
case fwProtoICMP:
|
case firewall.ProtoICMP:
|
||||||
fp = ft.ICMP
|
fp = ft.ICMP
|
||||||
case fwProtoAny:
|
case firewall.ProtoAny:
|
||||||
fp = ft.AnyProto
|
fp = ft.AnyProto
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown protocol %v", proto)
|
return fmt.Errorf("unknown protocol %v", proto)
|
||||||
@@ -256,7 +248,7 @@ func (f *Firewall) GetRuleHash() string {
|
|||||||
return hex.EncodeToString(sum[:])
|
return hex.EncodeToString(sum[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterface) error {
|
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
|
||||||
var table string
|
var table string
|
||||||
if inbound {
|
if inbound {
|
||||||
table = "firewall.inbound"
|
table = "firewall.inbound"
|
||||||
@@ -264,7 +256,7 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
|
|||||||
table = "firewall.outbound"
|
table = "firewall.outbound"
|
||||||
}
|
}
|
||||||
|
|
||||||
r := config.Get(table)
|
r := c.Get(table)
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -276,7 +268,7 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
|
|||||||
|
|
||||||
for i, t := range rs {
|
for i, t := range rs {
|
||||||
var groups []string
|
var groups []string
|
||||||
r, err := convertRule(t, table, i)
|
r, err := convertRule(l, t, table, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s rule #%v; %s", table, i, err)
|
return fmt.Errorf("%s rule #%v; %s", table, i, err)
|
||||||
}
|
}
|
||||||
@@ -319,13 +311,13 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
|
|||||||
var proto uint8
|
var proto uint8
|
||||||
switch r.Proto {
|
switch r.Proto {
|
||||||
case "any":
|
case "any":
|
||||||
proto = fwProtoAny
|
proto = firewall.ProtoAny
|
||||||
case "tcp":
|
case "tcp":
|
||||||
proto = fwProtoTCP
|
proto = firewall.ProtoTCP
|
||||||
case "udp":
|
case "udp":
|
||||||
proto = fwProtoUDP
|
proto = firewall.ProtoUDP
|
||||||
case "icmp":
|
case "icmp":
|
||||||
proto = fwProtoICMP
|
proto = firewall.ProtoICMP
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
|
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
|
||||||
}
|
}
|
||||||
@@ -347,27 +339,36 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) Drop(packet []byte, fp FirewallPacket, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool) bool {
|
var ErrInvalidRemoteIP = errors.New("remote IP is not in remote certificate subnets")
|
||||||
|
var ErrInvalidLocalIP = errors.New("local IP is not in list of handled local IPs")
|
||||||
|
var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
|
||||||
|
|
||||||
|
// Drop returns an error if the packet should be dropped, explaining why. It
|
||||||
|
// returns nil if the packet should not be dropped.
|
||||||
|
func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
||||||
// Check if we spoke to this tuple, if we did then allow this packet
|
// Check if we spoke to this tuple, if we did then allow this packet
|
||||||
if f.inConns(packet, fp, incoming) {
|
if f.inConns(packet, fp, incoming, h, caPool, localCache) {
|
||||||
return false
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure remote address matches nebula certificate
|
// Make sure remote address matches nebula certificate
|
||||||
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
||||||
if remoteCidr.Contains(fp.RemoteIP) == nil {
|
if remoteCidr.Contains(fp.RemoteIP) == nil {
|
||||||
return true
|
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||||
|
return ErrInvalidRemoteIP
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Simple case: Certificate has one IP and no subnets
|
// Simple case: Certificate has one IP and no subnets
|
||||||
if fp.RemoteIP != h.hostId {
|
if fp.RemoteIP != h.vpnIp {
|
||||||
return true
|
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||||
|
return ErrInvalidRemoteIP
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we are supposed to be handling this local ip address
|
// Make sure we are supposed to be handling this local ip address
|
||||||
if f.localIps.Contains(fp.LocalIP) == nil {
|
if f.localIps.Contains(fp.LocalIP) == nil {
|
||||||
return true
|
f.metrics(incoming).droppedLocalIP.Inc(1)
|
||||||
|
return ErrInvalidLocalIP
|
||||||
}
|
}
|
||||||
|
|
||||||
table := f.OutRules
|
table := f.OutRules
|
||||||
@@ -377,13 +378,22 @@ func (f *Firewall) Drop(packet []byte, fp FirewallPacket, incoming bool, h *Host
|
|||||||
|
|
||||||
// We now know which firewall table to check against
|
// We now know which firewall table to check against
|
||||||
if !table.match(fp, incoming, h.ConnectionState.peerCert, caPool) {
|
if !table.match(fp, incoming, h.ConnectionState.peerCert, caPool) {
|
||||||
return true
|
f.metrics(incoming).droppedNoRule.Inc(1)
|
||||||
|
return ErrNoMatchingRule
|
||||||
}
|
}
|
||||||
|
|
||||||
// We always want to conntrack since it is a faster operation
|
// We always want to conntrack since it is a faster operation
|
||||||
f.addConn(packet, fp, incoming)
|
f.addConn(packet, fp, incoming)
|
||||||
|
|
||||||
return false
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Firewall) metrics(incoming bool) firewallMetrics {
|
||||||
|
if incoming {
|
||||||
|
return f.incomingMetrics
|
||||||
|
} else {
|
||||||
|
return f.outgoingMetrics
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroy cleans up any known cyclical references so the object can be free'd my GC. This should be called if a new
|
// Destroy cleans up any known cyclical references so the object can be free'd my GC. This should be called if a new
|
||||||
@@ -393,77 +403,132 @@ func (f *Firewall) Destroy() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) EmitStats() {
|
func (f *Firewall) EmitStats() {
|
||||||
conntrackCount := len(f.Conns)
|
conntrack := f.Conntrack
|
||||||
|
conntrack.Lock()
|
||||||
|
conntrackCount := len(conntrack.Conns)
|
||||||
|
conntrack.Unlock()
|
||||||
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
|
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
|
||||||
|
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) inConns(packet []byte, fp FirewallPacket, incoming bool) bool {
|
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||||
f.connMutex.Lock()
|
if localCache != nil {
|
||||||
|
if _, ok := localCache[fp]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
conntrack := f.Conntrack
|
||||||
|
conntrack.Lock()
|
||||||
|
|
||||||
// Purge every time we test
|
// Purge every time we test
|
||||||
ep, has := f.TimerWheel.Purge()
|
ep, has := conntrack.TimerWheel.Purge()
|
||||||
if has {
|
if has {
|
||||||
f.evict(ep)
|
f.evict(ep)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, ok := f.Conns[fp]
|
c, ok := conntrack.Conns[fp]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
f.connMutex.Unlock()
|
conntrack.Unlock()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.rulesVersion != f.rulesVersion {
|
||||||
|
// This conntrack entry was for an older rule set, validate
|
||||||
|
// it still passes with the current rule set
|
||||||
|
table := f.OutRules
|
||||||
|
if c.incoming {
|
||||||
|
table = f.InRules
|
||||||
|
}
|
||||||
|
|
||||||
|
// We now know which firewall table to check against
|
||||||
|
if !table.match(fp, c.incoming, h.ConnectionState.peerCert, caPool) {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
h.logger(f.l).
|
||||||
|
WithField("fwPacket", fp).
|
||||||
|
WithField("incoming", c.incoming).
|
||||||
|
WithField("rulesVersion", f.rulesVersion).
|
||||||
|
WithField("oldRulesVersion", c.rulesVersion).
|
||||||
|
Debugln("dropping old conntrack entry, does not match new ruleset")
|
||||||
|
}
|
||||||
|
delete(conntrack.Conns, fp)
|
||||||
|
conntrack.Unlock()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
h.logger(f.l).
|
||||||
|
WithField("fwPacket", fp).
|
||||||
|
WithField("incoming", c.incoming).
|
||||||
|
WithField("rulesVersion", f.rulesVersion).
|
||||||
|
WithField("oldRulesVersion", c.rulesVersion).
|
||||||
|
Debugln("keeping old conntrack entry, does match new ruleset")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.rulesVersion = f.rulesVersion
|
||||||
|
}
|
||||||
|
|
||||||
switch fp.Protocol {
|
switch fp.Protocol {
|
||||||
case fwProtoTCP:
|
case firewall.ProtoTCP:
|
||||||
c.Expires = time.Now().Add(f.TCPTimeout)
|
c.Expires = time.Now().Add(f.TCPTimeout)
|
||||||
if incoming {
|
if incoming {
|
||||||
f.checkTCPRTT(c, packet)
|
f.checkTCPRTT(c, packet)
|
||||||
} else {
|
} else {
|
||||||
setTCPRTTTracking(c, packet)
|
setTCPRTTTracking(c, packet)
|
||||||
}
|
}
|
||||||
case fwProtoUDP:
|
case firewall.ProtoUDP:
|
||||||
c.Expires = time.Now().Add(f.UDPTimeout)
|
c.Expires = time.Now().Add(f.UDPTimeout)
|
||||||
default:
|
default:
|
||||||
c.Expires = time.Now().Add(f.DefaultTimeout)
|
c.Expires = time.Now().Add(f.DefaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connMutex.Unlock()
|
conntrack.Unlock()
|
||||||
|
|
||||||
|
if localCache != nil {
|
||||||
|
localCache[fp] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) addConn(packet []byte, fp FirewallPacket, incoming bool) {
|
func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
|
||||||
var timeout time.Duration
|
var timeout time.Duration
|
||||||
c := &conn{}
|
c := &conn{}
|
||||||
|
|
||||||
switch fp.Protocol {
|
switch fp.Protocol {
|
||||||
case fwProtoTCP:
|
case firewall.ProtoTCP:
|
||||||
timeout = f.TCPTimeout
|
timeout = f.TCPTimeout
|
||||||
if !incoming {
|
if !incoming {
|
||||||
setTCPRTTTracking(c, packet)
|
setTCPRTTTracking(c, packet)
|
||||||
}
|
}
|
||||||
case fwProtoUDP:
|
case firewall.ProtoUDP:
|
||||||
timeout = f.UDPTimeout
|
timeout = f.UDPTimeout
|
||||||
default:
|
default:
|
||||||
timeout = f.DefaultTimeout
|
timeout = f.DefaultTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connMutex.Lock()
|
conntrack := f.Conntrack
|
||||||
if _, ok := f.Conns[fp]; !ok {
|
conntrack.Lock()
|
||||||
f.TimerWheel.Add(fp, timeout)
|
if _, ok := conntrack.Conns[fp]; !ok {
|
||||||
|
conntrack.TimerWheel.Add(fp, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Record which rulesVersion allowed this connection, so we can retest after
|
||||||
|
// firewall reload
|
||||||
|
c.incoming = incoming
|
||||||
|
c.rulesVersion = f.rulesVersion
|
||||||
c.Expires = time.Now().Add(timeout)
|
c.Expires = time.Now().Add(timeout)
|
||||||
f.Conns[fp] = c
|
conntrack.Conns[fp] = c
|
||||||
f.connMutex.Unlock()
|
conntrack.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
|
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
|
||||||
// Caller must own the connMutex lock!
|
// Caller must own the connMutex lock!
|
||||||
func (f *Firewall) evict(p FirewallPacket) {
|
func (f *Firewall) evict(p firewall.Packet) {
|
||||||
//TODO: report a stat if the tcp rtt tracking was never resolved?
|
//TODO: report a stat if the tcp rtt tracking was never resolved?
|
||||||
// Are we still tracking this conn?
|
// Are we still tracking this conn?
|
||||||
t, ok := f.Conns[p]
|
conntrack := f.Conntrack
|
||||||
|
t, ok := conntrack.Conns[p]
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -472,29 +537,29 @@ func (f *Firewall) evict(p FirewallPacket) {
|
|||||||
|
|
||||||
// Timeout is in the future, re-add the timer
|
// Timeout is in the future, re-add the timer
|
||||||
if newT > 0 {
|
if newT > 0 {
|
||||||
f.TimerWheel.Add(p, newT)
|
conntrack.TimerWheel.Add(p, newT)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// This conn is done
|
// This conn is done
|
||||||
delete(f.Conns, p)
|
delete(conntrack.Conns, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ft *FirewallTable) match(p FirewallPacket, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
||||||
if ft.AnyProto.match(p, incoming, c, caPool) {
|
if ft.AnyProto.match(p, incoming, c, caPool) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
switch p.Protocol {
|
switch p.Protocol {
|
||||||
case fwProtoTCP:
|
case firewall.ProtoTCP:
|
||||||
if ft.TCP.match(p, incoming, c, caPool) {
|
if ft.TCP.match(p, incoming, c, caPool) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case fwProtoUDP:
|
case firewall.ProtoUDP:
|
||||||
if ft.UDP.match(p, incoming, c, caPool) {
|
if ft.UDP.match(p, incoming, c, caPool) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case fwProtoICMP:
|
case firewall.ProtoICMP:
|
||||||
if ft.ICMP.match(p, incoming, c, caPool) {
|
if ft.ICMP.match(p, incoming, c, caPool) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -524,7 +589,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fp firewallPort) match(p FirewallPacket, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
||||||
// We don't have any allowed ports, bail
|
// We don't have any allowed ports, bail
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
return false
|
return false
|
||||||
@@ -533,7 +598,7 @@ func (fp firewallPort) match(p FirewallPacket, incoming bool, c *cert.NebulaCert
|
|||||||
var port int32
|
var port int32
|
||||||
|
|
||||||
if p.Fragment {
|
if p.Fragment {
|
||||||
port = fwPortFragment
|
port = firewall.PortFragment
|
||||||
} else if incoming {
|
} else if incoming {
|
||||||
port = int32(p.LocalPort)
|
port = int32(p.LocalPort)
|
||||||
} else {
|
} else {
|
||||||
@@ -544,7 +609,7 @@ func (fp firewallPort) match(p FirewallPacket, incoming bool, c *cert.NebulaCert
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return fp[fwPortAny].match(p, c, caPool)
|
return fp[firewall.PortAny].match(p, c, caPool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error {
|
func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error {
|
||||||
@@ -552,7 +617,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
|
|||||||
return &FirewallRule{
|
return &FirewallRule{
|
||||||
Hosts: make(map[string]struct{}),
|
Hosts: make(map[string]struct{}),
|
||||||
Groups: make([][]string, 0),
|
Groups: make([][]string, 0),
|
||||||
CIDR: NewCIDRTree(),
|
CIDR: cidr.NewTree4(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -587,7 +652,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FirewallCA) match(p FirewallPacket, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
||||||
if fc == nil {
|
if fc == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -620,7 +685,7 @@ func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) err
|
|||||||
// If it's any we need to wipe out any pre-existing rules to save on memory
|
// If it's any we need to wipe out any pre-existing rules to save on memory
|
||||||
fr.Groups = make([][]string, 0)
|
fr.Groups = make([][]string, 0)
|
||||||
fr.Hosts = make(map[string]struct{})
|
fr.Hosts = make(map[string]struct{})
|
||||||
fr.CIDR = NewCIDRTree()
|
fr.CIDR = cidr.NewTree4()
|
||||||
} else {
|
} else {
|
||||||
if len(groups) > 0 {
|
if len(groups) > 0 {
|
||||||
fr.Groups = append(fr.Groups, groups)
|
fr.Groups = append(fr.Groups, groups)
|
||||||
@@ -660,7 +725,7 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) match(p FirewallPacket, c *cert.NebulaCertificate) bool {
|
func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool {
|
||||||
if fr == nil {
|
if fr == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -714,7 +779,7 @@ type rule struct {
|
|||||||
CASha string
|
CASha string
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertRule(p interface{}, table string, i int) (rule, error) {
|
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
|
||||||
r := rule{}
|
r := rule{}
|
||||||
|
|
||||||
m, ok := p.(map[interface{}]interface{})
|
m, ok := p.(map[interface{}]interface{})
|
||||||
@@ -769,12 +834,12 @@ func convertRule(p interface{}, table string, i int) (rule, error) {
|
|||||||
|
|
||||||
func parsePort(s string) (startPort, endPort int32, err error) {
|
func parsePort(s string) (startPort, endPort int32, err error) {
|
||||||
if s == "any" {
|
if s == "any" {
|
||||||
startPort = fwPortAny
|
startPort = firewall.PortAny
|
||||||
endPort = fwPortAny
|
endPort = firewall.PortAny
|
||||||
|
|
||||||
} else if s == "fragment" {
|
} else if s == "fragment" {
|
||||||
startPort = fwPortFragment
|
startPort = firewall.PortFragment
|
||||||
endPort = fwPortFragment
|
endPort = firewall.PortFragment
|
||||||
|
|
||||||
} else if strings.Contains(s, `-`) {
|
} else if strings.Contains(s, `-`) {
|
||||||
sPorts := strings.SplitN(s, `-`, 2)
|
sPorts := strings.SplitN(s, `-`, 2)
|
||||||
@@ -798,8 +863,8 @@ func parsePort(s string) (startPort, endPort int32, err error) {
|
|||||||
startPort = int32(rStartPort)
|
startPort = int32(rStartPort)
|
||||||
endPort = int32(rEndPort)
|
endPort = int32(rEndPort)
|
||||||
|
|
||||||
if startPort == fwPortAny {
|
if startPort == firewall.PortAny {
|
||||||
endPort = fwPortAny
|
endPort = firewall.PortAny
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
59
firewall/cache.go
Normal file
59
firewall/cache.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package firewall
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConntrackCache is used as a local routine cache to know if a given flow
|
||||||
|
// has been seen in the conntrack table.
|
||||||
|
type ConntrackCache map[Packet]struct{}
|
||||||
|
|
||||||
|
type ConntrackCacheTicker struct {
|
||||||
|
cacheV uint64
|
||||||
|
cacheTick uint64
|
||||||
|
|
||||||
|
cache ConntrackCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
|
||||||
|
if d == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &ConntrackCacheTicker{
|
||||||
|
cache: ConntrackCache{},
|
||||||
|
}
|
||||||
|
|
||||||
|
go c.tick(d)
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConntrackCacheTicker) tick(d time.Duration) {
|
||||||
|
for {
|
||||||
|
time.Sleep(d)
|
||||||
|
atomic.AddUint64(&c.cacheTick, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get checks if the cache ticker has moved to the next version before returning
|
||||||
|
// the map. If it has moved, we reset the map.
|
||||||
|
func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache {
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if tick := atomic.LoadUint64(&c.cacheTick); tick != c.cacheV {
|
||||||
|
c.cacheV = tick
|
||||||
|
if ll := len(c.cache); ll > 0 {
|
||||||
|
if l.Level == logrus.DebugLevel {
|
||||||
|
l.WithField("len", ll).Debug("resetting conntrack cache")
|
||||||
|
}
|
||||||
|
c.cache = make(ConntrackCache, ll)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.cache
|
||||||
|
}
|
||||||
62
firewall/packet.go
Normal file
62
firewall/packet.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package firewall
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type m map[string]interface{}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
|
||||||
|
ProtoTCP = 6
|
||||||
|
ProtoUDP = 17
|
||||||
|
ProtoICMP = 1
|
||||||
|
|
||||||
|
PortAny = 0 // Special value for matching `port: any`
|
||||||
|
PortFragment = -1 // Special value for matching `port: fragment`
|
||||||
|
)
|
||||||
|
|
||||||
|
type Packet struct {
|
||||||
|
LocalIP iputil.VpnIp
|
||||||
|
RemoteIP iputil.VpnIp
|
||||||
|
LocalPort uint16
|
||||||
|
RemotePort uint16
|
||||||
|
Protocol uint8
|
||||||
|
Fragment bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fp *Packet) Copy() *Packet {
|
||||||
|
return &Packet{
|
||||||
|
LocalIP: fp.LocalIP,
|
||||||
|
RemoteIP: fp.RemoteIP,
|
||||||
|
LocalPort: fp.LocalPort,
|
||||||
|
RemotePort: fp.RemotePort,
|
||||||
|
Protocol: fp.Protocol,
|
||||||
|
Fragment: fp.Fragment,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fp Packet) MarshalJSON() ([]byte, error) {
|
||||||
|
var proto string
|
||||||
|
switch fp.Protocol {
|
||||||
|
case ProtoTCP:
|
||||||
|
proto = "tcp"
|
||||||
|
case ProtoICMP:
|
||||||
|
proto = "icmp"
|
||||||
|
case ProtoUDP:
|
||||||
|
proto = "udp"
|
||||||
|
default:
|
||||||
|
proto = fmt.Sprintf("unknown %v", fp.Protocol)
|
||||||
|
}
|
||||||
|
return json.Marshal(m{
|
||||||
|
"LocalIP": fp.LocalIP.String(),
|
||||||
|
"RemoteIP": fp.RemoteIP.String(),
|
||||||
|
"LocalPort": fp.LocalPort,
|
||||||
|
"RemotePort": fp.RemotePort,
|
||||||
|
"Protocol": proto,
|
||||||
|
"Fragment": fp.Fragment,
|
||||||
|
})
|
||||||
|
}
|
||||||
411
firewall_test.go
411
firewall_test.go
@@ -11,145 +11,138 @@ import (
|
|||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewFirewall(t *testing.T) {
|
func TestNewFirewall(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
c := &cert.NebulaCertificate{}
|
c := &cert.NebulaCertificate{}
|
||||||
fw := NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.NotNil(t, fw.Conns)
|
conntrack := fw.Conntrack
|
||||||
|
assert.NotNil(t, conntrack)
|
||||||
|
assert.NotNil(t, conntrack.Conns)
|
||||||
|
assert.NotNil(t, conntrack.TimerWheel)
|
||||||
assert.NotNil(t, fw.InRules)
|
assert.NotNil(t, fw.InRules)
|
||||||
assert.NotNil(t, fw.OutRules)
|
assert.NotNil(t, fw.OutRules)
|
||||||
assert.NotNil(t, fw.TimerWheel)
|
|
||||||
assert.Equal(t, time.Second, fw.TCPTimeout)
|
assert.Equal(t, time.Second, fw.TCPTimeout)
|
||||||
assert.Equal(t, time.Minute, fw.UDPTimeout)
|
assert.Equal(t, time.Minute, fw.UDPTimeout)
|
||||||
assert.Equal(t, time.Hour, fw.DefaultTimeout)
|
assert.Equal(t, time.Hour, fw.DefaultTimeout)
|
||||||
|
|
||||||
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, fw.TimerWheel.wheelLen)
|
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Hour, time.Minute, c)
|
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
|
||||||
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, fw.TimerWheel.wheelLen)
|
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(time.Hour, time.Second, time.Minute, c)
|
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
|
||||||
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, fw.TimerWheel.wheelLen)
|
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(time.Hour, time.Minute, time.Second, c)
|
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
|
||||||
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, fw.TimerWheel.wheelLen)
|
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(time.Minute, time.Hour, time.Second, c)
|
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
|
||||||
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, fw.TimerWheel.wheelLen)
|
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
||||||
|
|
||||||
fw = NewFirewall(time.Minute, time.Second, time.Hour, c)
|
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
|
||||||
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration)
|
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
|
||||||
assert.Equal(t, 3601, fw.TimerWheel.wheelLen)
|
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_AddRule(t *testing.T) {
|
func TestFirewall_AddRule(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
out := l.Out
|
|
||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
defer l.SetOutput(out)
|
|
||||||
|
|
||||||
c := &cert.NebulaCertificate{}
|
c := &cert.NebulaCertificate{}
|
||||||
fw := NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.NotNil(t, fw.InRules)
|
assert.NotNil(t, fw.InRules)
|
||||||
assert.NotNil(t, fw.OutRules)
|
assert.NotNil(t, fw.OutRules)
|
||||||
|
|
||||||
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
||||||
|
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoTCP, 1, 1, []string{}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, "", ""))
|
||||||
// An empty rule is any
|
// An empty rule is any
|
||||||
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
||||||
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.left)
|
|
||||||
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.right)
|
|
||||||
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.value)
|
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", ""))
|
||||||
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
||||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
||||||
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
||||||
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.left)
|
|
||||||
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.right)
|
|
||||||
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.value)
|
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoICMP, 1, 1, []string{}, "h1", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, "", ""))
|
||||||
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
||||||
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
||||||
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.left)
|
|
||||||
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.right)
|
|
||||||
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.value)
|
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, fwProtoAny, 1, 1, []string{}, "", ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, "", ""))
|
||||||
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(ip2int(ti.IP)))
|
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", ""))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha"))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
||||||
|
|
||||||
// Set any and clear fields
|
// Set any and clear fields
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", ""))
|
||||||
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
||||||
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(ip2int(ti.IP)))
|
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||||
|
|
||||||
// run twice just to make sure
|
// run twice just to make sure
|
||||||
//TODO: these ANY rules should clear the CA firewall portion
|
//TODO: these ANY rules should clear the CA firewall portion
|
||||||
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
||||||
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
||||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
||||||
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.left)
|
|
||||||
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.right)
|
|
||||||
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.value)
|
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
|
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
||||||
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "", anyIp, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||||
|
|
||||||
// Test error conditions
|
// Test error conditions
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", ""))
|
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", ""))
|
||||||
assert.Error(t, fw.AddRule(true, fwProtoAny, 10, 0, []string{}, "", nil, "", ""))
|
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, "", ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop(t *testing.T) {
|
func TestFirewall_Drop(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
out := l.Out
|
|
||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
defer l.SetOutput(out)
|
|
||||||
|
|
||||||
p := FirewallPacket{
|
p := firewall.Packet{
|
||||||
ip2int(net.IPv4(1, 2, 3, 4)),
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
ip2int(net.IPv4(1, 2, 3, 4)),
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
10,
|
||||||
90,
|
90,
|
||||||
fwProtoUDP,
|
firewall.ProtoUDP,
|
||||||
false,
|
false,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -171,53 +164,53 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c,
|
peerCert: &c,
|
||||||
},
|
},
|
||||||
hostId: ip2int(ipNet.IP),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
assert.True(t, fw.Drop([]byte{}, p, false, &h, cp))
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
// Allow inbound
|
// Allow inbound
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
// Allow outbound because conntrack
|
// Allow outbound because conntrack
|
||||||
assert.False(t, fw.Drop([]byte{}, p, false, &h, cp))
|
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||||
|
|
||||||
// test remote mismatch
|
// test remote mismatch
|
||||||
oldRemote := p.RemoteIP
|
oldRemote := p.RemoteIP
|
||||||
p.RemoteIP = ip2int(net.IPv4(1, 2, 3, 10))
|
p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10))
|
||||||
assert.True(t, fw.Drop([]byte{}, p, false, &h, cp))
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||||
p.RemoteIP = oldRemote
|
p.RemoteIP = oldRemote
|
||||||
|
|
||||||
// ensure signer doesn't get in the way of group checks
|
// ensure signer doesn't get in the way of group checks
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum"))
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad"))
|
||||||
assert.True(t, fw.Drop([]byte{}, p, true, &h, cp))
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caSha doesn't drop on match
|
// test caSha doesn't drop on match
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad"))
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum"))
|
||||||
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
|
|
||||||
// ensure ca name doesn't get in the way of group checks
|
// ensure ca name doesn't get in the way of group checks
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", ""))
|
||||||
assert.True(t, fw.Drop([]byte{}, p, true, &h, cp))
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caName doesn't drop on match
|
// test caName doesn't drop on match
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", ""))
|
||||||
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkFirewallTable_match(b *testing.B) {
|
func BenchmarkFirewallTable_match(b *testing.B) {
|
||||||
@@ -236,14 +229,14 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
b.Run("fail on proto", func(b *testing.B) {
|
b.Run("fail on proto", func(b *testing.B) {
|
||||||
c := &cert.NebulaCertificate{}
|
c := &cert.NebulaCertificate{}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(FirewallPacket{Protocol: fwProtoUDP}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Run("fail on port", func(b *testing.B) {
|
b.Run("fail on port", func(b *testing.B) {
|
||||||
c := &cert.NebulaCertificate{}
|
c := &cert.NebulaCertificate{}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 1}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -257,7 +250,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -269,7 +262,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -281,12 +274,12 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Run("pass on ip", func(b *testing.B) {
|
b.Run("pass on ip", func(b *testing.B) {
|
||||||
ip := ip2int(net.IPv4(172, 1, 1, 1))
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
c := &cert.NebulaCertificate{
|
c := &cert.NebulaCertificate{
|
||||||
Details: cert.NebulaCertificateDetails{
|
Details: cert.NebulaCertificateDetails{
|
||||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
@@ -294,14 +287,14 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "")
|
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "")
|
||||||
|
|
||||||
b.Run("pass on ip with any port", func(b *testing.B) {
|
b.Run("pass on ip with any port", func(b *testing.B) {
|
||||||
ip := ip2int(net.IPv4(172, 1, 1, 1))
|
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
c := &cert.NebulaCertificate{
|
c := &cert.NebulaCertificate{
|
||||||
Details: cert.NebulaCertificateDetails{
|
Details: cert.NebulaCertificateDetails{
|
||||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
@@ -309,23 +302,22 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop2(t *testing.T) {
|
func TestFirewall_Drop2(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
out := l.Out
|
|
||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
defer l.SetOutput(out)
|
|
||||||
|
|
||||||
p := FirewallPacket{
|
p := firewall.Packet{
|
||||||
ip2int(net.IPv4(1, 2, 3, 4)),
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
ip2int(net.IPv4(1, 2, 3, 4)),
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
10,
|
10,
|
||||||
90,
|
90,
|
||||||
fwProtoUDP,
|
firewall.ProtoUDP,
|
||||||
false,
|
false,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,7 +337,7 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c,
|
peerCert: &c,
|
||||||
},
|
},
|
||||||
hostId: ip2int(ipNet.IP),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
@@ -363,29 +355,28 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
h1.CreateRemoteCIDR(&c1)
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
fw := NewFirewall(time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// h1/c1 lacks the proper groups
|
// h1/c1 lacks the proper groups
|
||||||
assert.True(t, fw.Drop([]byte{}, p, true, &h1, cp))
|
assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule)
|
||||||
// c has the proper groups
|
// c has the proper groups
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop3(t *testing.T) {
|
func TestFirewall_Drop3(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
out := l.Out
|
|
||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
defer l.SetOutput(out)
|
|
||||||
|
|
||||||
p := FirewallPacket{
|
p := firewall.Packet{
|
||||||
ip2int(net.IPv4(1, 2, 3, 4)),
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
ip2int(net.IPv4(1, 2, 3, 4)),
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
1,
|
1,
|
||||||
1,
|
1,
|
||||||
fwProtoUDP,
|
firewall.ProtoUDP,
|
||||||
false,
|
false,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -412,7 +403,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c1,
|
peerCert: &c1,
|
||||||
},
|
},
|
||||||
hostId: ip2int(ipNet.IP),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h1.CreateRemoteCIDR(&c1)
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
@@ -427,7 +418,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c2,
|
peerCert: &c2,
|
||||||
},
|
},
|
||||||
hostId: ip2int(ipNet.IP),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h2.CreateRemoteCIDR(&c2)
|
h2.CreateRemoteCIDR(&c2)
|
||||||
|
|
||||||
@@ -442,23 +433,90 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c3,
|
peerCert: &c3,
|
||||||
},
|
},
|
||||||
hostId: ip2int(ipNet.IP),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h3.CreateRemoteCIDR(&c3)
|
h3.CreateRemoteCIDR(&c3)
|
||||||
|
|
||||||
fw := NewFirewall(time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 1, 1, []string{}, "host1", nil, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, "", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, fwProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha"))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// c1 should pass because host match
|
// c1 should pass because host match
|
||||||
assert.False(t, fw.Drop([]byte{}, p, true, &h1, cp))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil))
|
||||||
// c2 should pass because ca sha match
|
// c2 should pass because ca sha match
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.False(t, fw.Drop([]byte{}, p, true, &h2, cp))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil))
|
||||||
// c3 should fail because no match
|
// c3 should fail because no match
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.True(t, fw.Drop([]byte{}, p, true, &h3, cp))
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
|
ob := &bytes.Buffer{}
|
||||||
|
l.SetOutput(ob)
|
||||||
|
|
||||||
|
p := firewall.Packet{
|
||||||
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
|
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
|
10,
|
||||||
|
90,
|
||||||
|
firewall.ProtoUDP,
|
||||||
|
false,
|
||||||
|
}
|
||||||
|
|
||||||
|
ipNet := net.IPNet{
|
||||||
|
IP: net.IPv4(1, 2, 3, 4),
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
c := cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "host1",
|
||||||
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
Groups: []string{"default-group"},
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||||
|
Issuer: "signer-shasum",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
h := HostInfo{
|
||||||
|
ConnectionState: &ConnectionState{
|
||||||
|
peerCert: &c,
|
||||||
|
},
|
||||||
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
|
}
|
||||||
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
|
||||||
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
|
// Drop outbound
|
||||||
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
// Allow inbound
|
||||||
|
resetConntrack(fw)
|
||||||
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
|
// Allow outbound because conntrack
|
||||||
|
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||||
|
|
||||||
|
oldFw := fw
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, "", ""))
|
||||||
|
fw.Conntrack = oldFw.Conntrack
|
||||||
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
|
// Allow outbound because conntrack and new rules allow port 10
|
||||||
|
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||||
|
|
||||||
|
oldFw = fw
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, "", ""))
|
||||||
|
fw.Conntrack = oldFw.Conntrack
|
||||||
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
|
// Drop outbound because conntrack doesn't match new ruleset
|
||||||
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkLookup(b *testing.B) {
|
func BenchmarkLookup(b *testing.B) {
|
||||||
@@ -577,124 +635,126 @@ func Test_parsePort(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFirewallFromConfig(t *testing.T) {
|
func TestNewFirewallFromConfig(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
// Test a bad rule definition
|
// Test a bad rule definition
|
||||||
c := &cert.NebulaCertificate{}
|
c := &cert.NebulaCertificate{}
|
||||||
conf := NewConfig()
|
conf := config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"}
|
||||||
_, err := NewFirewallFromConfig(c, conf)
|
_, err := NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
|
assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
|
||||||
|
|
||||||
// Test both port and code
|
// Test both port and code
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}}
|
||||||
_, err = NewFirewallFromConfig(c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
|
assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
|
||||||
|
|
||||||
// Test missing host, group, cidr, ca_name and ca_sha
|
// Test missing host, group, cidr, ca_name and ca_sha
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
||||||
_, err = NewFirewallFromConfig(c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided")
|
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided")
|
||||||
|
|
||||||
// Test code/port error
|
// Test code/port error
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}}
|
||||||
_, err = NewFirewallFromConfig(c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
|
assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
|
||||||
|
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}}
|
||||||
_, err = NewFirewallFromConfig(c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
|
assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
|
||||||
|
|
||||||
// Test proto error
|
// Test proto error
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}}
|
||||||
_, err = NewFirewallFromConfig(c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
|
assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
|
||||||
|
|
||||||
// Test cidr parse error
|
// Test cidr parse error
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
|
||||||
_, err = NewFirewallFromConfig(c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
||||||
|
|
||||||
// Test both group and groups
|
// Test both group and groups
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
||||||
_, err = NewFirewallFromConfig(c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
|
assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddFirewallRulesFromConfig(t *testing.T) {
|
func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
// Test adding tcp rule
|
// Test adding tcp rule
|
||||||
conf := NewConfig()
|
conf := config.NewC(l)
|
||||||
mf := &mockFirewall{}
|
mf := &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding udp rule
|
// Test adding udp rule
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding icmp rule
|
// Test adding icmp rule
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding any rule
|
// Test adding any rule
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_sha
|
// Test adding rule with ca_sha
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_name
|
// Test adding rule with ca_name
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall)
|
||||||
|
|
||||||
// Test single group
|
// Test single group
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test single groups
|
// Test single groups
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test multiple AND groups
|
// Test multiple AND groups
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test Add error
|
// Test Add error
|
||||||
conf = NewConfig()
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
mf.nextCallReturn = errors.New("test error")
|
mf.nextCallReturn = errors.New("test error")
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||||
assert.EqualError(t, AddFirewallRulesFromConfig(true, conf, mf), "firewall.inbound rule #0; `test error`")
|
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTCPRTTTracking(t *testing.T) {
|
func TestTCPRTTTracking(t *testing.T) {
|
||||||
@@ -789,17 +849,16 @@ func TestTCPRTTTracking(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_convertRule(t *testing.T) {
|
func TestFirewall_convertRule(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
out := l.Out
|
|
||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
defer l.SetOutput(out)
|
|
||||||
|
|
||||||
// Ensure group array of 1 is converted and a warning is printed
|
// Ensure group array of 1 is converted and a warning is printed
|
||||||
c := map[interface{}]interface{}{
|
c := map[interface{}]interface{}{
|
||||||
"group": []interface{}{"group1"},
|
"group": []interface{}{"group1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := convertRule(c, "test", 1)
|
r, err := convertRule(l, c, "test", 1)
|
||||||
assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value")
|
assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, "group1", r.Group)
|
assert.Equal(t, "group1", r.Group)
|
||||||
@@ -810,7 +869,7 @@ func TestFirewall_convertRule(t *testing.T) {
|
|||||||
"group": []interface{}{"group1", "group2"},
|
"group": []interface{}{"group1", "group2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err = convertRule(c, "test", 1)
|
r, err = convertRule(l, c, "test", 1)
|
||||||
assert.Equal(t, "", ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
|
assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
|
||||||
|
|
||||||
@@ -820,7 +879,7 @@ func TestFirewall_convertRule(t *testing.T) {
|
|||||||
"group": "group1",
|
"group": "group1",
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err = convertRule(c, "test", 1)
|
r, err = convertRule(l, c, "test", 1)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, "group1", r.Group)
|
assert.Equal(t, "group1", r.Group)
|
||||||
}
|
}
|
||||||
@@ -861,7 +920,7 @@ func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, end
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resetConntrack(fw *Firewall) {
|
func resetConntrack(fw *Firewall) {
|
||||||
fw.connMutex.Lock()
|
fw.Conntrack.Lock()
|
||||||
fw.Conns = map[FirewallPacket]*conn{}
|
fw.Conntrack.Conns = map[firewall.Packet]*conn{}
|
||||||
fw.connMutex.Unlock()
|
fw.Conntrack.Unlock()
|
||||||
}
|
}
|
||||||
|
|||||||
60
go.mod
60
go.mod
@@ -1,33 +1,43 @@
|
|||||||
module github.com/slackhq/nebula
|
module github.com/slackhq/nebula
|
||||||
|
|
||||||
go 1.12
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||||
github.com/armon/go-radix v1.0.0
|
github.com/armon/go-radix v1.0.0
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
github.com/flynn/noise v1.0.0
|
||||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/golang/protobuf v1.3.2
|
github.com/golang/protobuf v1.5.2
|
||||||
|
github.com/google/gopacket v1.1.19
|
||||||
github.com/imdario/mergo v0.3.8
|
github.com/imdario/mergo v0.3.8
|
||||||
github.com/kardianos/service v1.0.0
|
github.com/kardianos/service v1.2.0
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
github.com/miekg/dns v1.1.43
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||||
github.com/miekg/dns v1.1.25
|
github.com/prometheus/client_golang v1.11.0
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.2.1
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee // indirect
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||||
github.com/prometheus/procfs v0.0.8 // indirect
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||||
github.com/sirupsen/logrus v1.4.2
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
||||||
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/stretchr/testify v1.4.0
|
github.com/vishvananda/netlink v1.1.0
|
||||||
github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a
|
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect
|
||||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
|
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553
|
golang.org/x/sys v0.0.0-20211102192858-4dd72447c267
|
||||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449
|
google.golang.org/protobuf v1.27.1
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gopkg.in/yaml.v2 v2.2.7
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
505
go.sum
505
go.sum
@@ -1,156 +1,529 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
|
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||||
|
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||||
|
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||||
|
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||||
|
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||||
|
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||||
|
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||||
|
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||||
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
|
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||||
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||||
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
||||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||||
|
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
|
github.com/kardianos/service v1.2.0 h1:bGuZ/epo3vrt8IPC7mnKQolqFeYJb7Cs8Rk4PSOBB/g=
|
||||||
|
github.com/kardianos/service v1.2.0/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||||
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg=
|
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
|
||||||
github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c h1:G/mfx/MWYuaaGlHkZQBBXFAJiYnRt/GaOVxnRHjlxg4=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c/go.mod h1:1yMri853KAI2pPAUnESjaqZj9JeImOUM+6A4GuuPmTs=
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
|
||||||
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f/go.mod h1:nwPd6pDNId/Xi16qtKrFHrauSwMNuvk+zcjk89wrnlA=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
|
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||||
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee h1:iBZPTYkGLvdu6+A5TsMUJQkQX9Ad4aCEnSQtdxPuTCQ=
|
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||||
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
|
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||||
|
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||||
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b h1:+y4hCMc/WKsDbAPsOQZgBSaSZ26uh2afyaWeVg/3s/c=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||||
|
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||||
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||||
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
||||||
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a h1:Bt1IVPhiCDMqwGrc2nnbIN4QKvJGx6SK2NzWBmW00ao=
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||||
|
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||||
|
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
|
||||||
|
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||||
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||||
|
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3 h1:VrJZAjbekhoRn7n5FBujY31gboH+iB3pdLxn3gE9FjU=
|
||||||
|
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211102192858-4dd72447c267 h1:7zYaz3tjChtpayGDzu6H0hDAUM5zIGA2XW7kRNgQ0jc=
|
||||||
|
golang.org/x/sys v0.0.0-20211102192858-4dd72447c267/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||||
|
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||||
|
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||||
|
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||||
|
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||||
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
|||||||
35
handshake.go
35
handshake.go
@@ -1,36 +1,29 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
const (
|
import (
|
||||||
handshakeIXPSK0 = 0
|
"github.com/slackhq/nebula/header"
|
||||||
handshakeXXPSK0 = 1
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func HandleIncomingHandshake(f *Interface, addr *udpAddr, packet []byte, h *Header, hostinfo *HostInfo) {
|
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, packet []byte, h *header.H, hostinfo *HostInfo) {
|
||||||
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
|
// First remote allow list check before we know the vpnIp
|
||||||
//TODO: For stage 1 we won't have hostinfo yet but stage 2 and above would require it, this check may be helpful in those cases
|
if !f.lightHouse.remoteAllowList.AllowUnknownVpnIp(addr.IP) {
|
||||||
//if err != nil {
|
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
// l.WithError(err).WithField("udpAddr", addr).Error("Error while finding host info for handshake message")
|
|
||||||
// return
|
|
||||||
//}
|
|
||||||
|
|
||||||
if !f.lightHouse.remoteAllowList.Allow(udp2ipInt(addr)) {
|
|
||||||
l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tearDown := false
|
|
||||||
switch h.Subtype {
|
switch h.Subtype {
|
||||||
case handshakeIXPSK0:
|
case header.HandshakeIXPSK0:
|
||||||
switch h.MessageCounter {
|
switch h.MessageCounter {
|
||||||
case 1:
|
case 1:
|
||||||
tearDown = ixHandshakeStage1(f, addr, newHostinfo, packet, h)
|
ixHandshakeStage1(f, addr, packet, h)
|
||||||
case 2:
|
case 2:
|
||||||
tearDown = ixHandshakeStage2(f, addr, newHostinfo, packet, h)
|
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
|
||||||
|
tearDown := ixHandshakeStage2(f, addr, newHostinfo, packet, h)
|
||||||
|
if tearDown && newHostinfo != nil {
|
||||||
|
f.handshakeManager.DeleteHostInfo(newHostinfo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if tearDown && newHostinfo != nil {
|
|
||||||
f.handshakeManager.DeleteIndex(newHostinfo.localIndexId)
|
|
||||||
f.handshakeManager.DeleteVpnIP(newHostinfo.hostId)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
519
handshake_ix.go
519
handshake_ix.go
@@ -4,40 +4,36 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"bytes"
|
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOISE IX Handshakes
|
// NOISE IX Handshakes
|
||||||
|
|
||||||
// This function constructs a handshake packet, but does not actually send it
|
// This function constructs a handshake packet, but does not actually send it
|
||||||
// Sending is done by the handshake manager
|
// Sending is done by the handshake manager
|
||||||
func ixHandshakeStage0(f *Interface, vpnIp uint32, hostinfo *HostInfo) {
|
func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
||||||
// This queries the lighthouse if we don't know a remote for the host
|
// This queries the lighthouse if we don't know a remote for the host
|
||||||
|
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
|
||||||
|
// more quickly, effect is a quicker handshake.
|
||||||
if hostinfo.remote == nil {
|
if hostinfo.remote == nil {
|
||||||
ips, err := f.lightHouse.Query(vpnIp, f)
|
f.lightHouse.QueryServer(vpnIp, f)
|
||||||
if err != nil {
|
|
||||||
//l.Debugln(err)
|
|
||||||
}
|
|
||||||
for _, ip := range ips {
|
|
||||||
hostinfo.AddRemote(ip)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
myIndex, err := generateIndex()
|
err := f.handshakeManager.AddIndexHostInfo(hostinfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).
|
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := hostinfo.ConnectionState
|
ci := hostinfo.ConnectionState
|
||||||
f.handshakeManager.AddIndexHostInfo(myIndex, hostinfo)
|
|
||||||
|
|
||||||
hsProto := &NebulaHandshakeDetails{
|
hsProto := &NebulaHandshakeDetails{
|
||||||
InitiatorIndex: myIndex,
|
InitiatorIndex: hostinfo.localIndexId,
|
||||||
Time: uint64(time.Now().Unix()),
|
Time: uint64(time.Now().UnixNano()),
|
||||||
Cert: ci.certState.rawCertificateNoKey,
|
Cert: ci.certState.rawCertificateNoKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,227 +45,329 @@ func ixHandshakeStage0(f *Interface, vpnIp uint32, hostinfo *HostInfo) {
|
|||||||
hsBytes, err = proto.Marshal(hs)
|
hsBytes, err = proto.Marshal(hs)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).
|
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
header := HeaderEncode(make([]byte, HeaderLen), Version, uint8(handshake), handshakeIXPSK0, 0, 1)
|
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
||||||
atomic.AddUint64(ci.messageCounter, 1)
|
atomic.AddUint64(&ci.atomicMessageCounter, 1)
|
||||||
|
|
||||||
msg, _, _, err := ci.H.WriteMessage(header, hsBytes)
|
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).
|
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We are sending handshake packet 1, so we don't expect to receive
|
||||||
|
// handshake packet 1 from the responder
|
||||||
|
ci.window.Update(f.l, 1)
|
||||||
|
|
||||||
hostinfo.HandshakePacket[0] = msg
|
hostinfo.HandshakePacket[0] = msg
|
||||||
hostinfo.HandshakeReady = true
|
hostinfo.HandshakeReady = true
|
||||||
hostinfo.handshakeStart = time.Now()
|
hostinfo.handshakeStart = time.Now()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage1(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet []byte, h *Header) bool {
|
func ixHandshakeStage1(f *Interface, addr *udp.Addr, packet []byte, h *header.H) {
|
||||||
var ip uint32
|
var (
|
||||||
if h.RemoteIndex == 0 {
|
err error
|
||||||
ci := f.newConnectionState(false, noise.HandshakeIX, []byte{}, 0)
|
ci *ConnectionState
|
||||||
// Mark packet 1 as seen so it doesn't show up as missed
|
msg []byte
|
||||||
ci.window.Update(1)
|
)
|
||||||
|
|
||||||
msg, _, _, err := ci.H.ReadMessage(nil, packet[HeaderLen:])
|
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.ReadMessage")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
hs := &NebulaHandshake{}
|
hs := &NebulaHandshake{}
|
||||||
|
|
||||||
|
// Handle multiple possible psk options, ensure the protobuf comes out clean too
|
||||||
|
for _, p := range f.psk.Cache {
|
||||||
|
//TODO: benchmark generation time of makePsk
|
||||||
|
ci, err = f.newConnectionState(f.l, false, p)
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithError(err).WithField("udpAddr", addr).Error("Failed to get a new connection state")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, _, _, err = ci.H.ReadMessage(nil, packet[header.Len:])
|
||||||
|
if err != nil {
|
||||||
|
// Calls to ReadMessage with an incorrect psk should fail, try the next one if we have one
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sometimes ReadMessage returns fine with a nil psk even if the handshake is using a psk, ensure our protobuf
|
||||||
|
// comes out clean as well
|
||||||
err = proto.Unmarshal(msg, hs)
|
err = proto.Unmarshal(msg, hs)
|
||||||
/*
|
if err == nil {
|
||||||
l.Debugln("GOT INDEX: ", hs.Details.InitiatorIndex)
|
// There was no error, we can continue with this handshake
|
||||||
*/
|
break
|
||||||
if err != nil || hs.Details == nil {
|
|
||||||
l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo, _ := f.handshakeManager.pendingHostMap.QueryReverseIndex(hs.Details.InitiatorIndex)
|
// The unmarshal failed, try the next psk if we have one
|
||||||
if hostinfo != nil && bytes.Equal(hostinfo.HandshakePacket[0], packet[HeaderLen:]) {
|
}
|
||||||
if msg, ok := hostinfo.HandshakePacket[2]; ok {
|
|
||||||
err := f.outside.WriteTo(msg, addr)
|
// We finished with an error, log it and get out
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
// We aren't logging the error here because we can't be sure of the failure when using psk
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
f.l.WithField("udpAddr", addr).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
WithError(err).Error("Failed to send handshake message")
|
Error("Was unable to decrypt the handshake")
|
||||||
} else {
|
return
|
||||||
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
|
||||||
Info("Handshake message sent")
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
// Mark packet 1 as seen so it doesn't show up as missed
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cached", true).
|
ci.window.Update(f.l, 1)
|
||||||
WithField("packets", hostinfo.HandshakePacket).
|
|
||||||
Error("Seen this handshake packet already but don't have a cached packet to return")
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert)
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
||||||
Info("Invalid certificate from host")
|
Info("Invalid certificate from host")
|
||||||
return true
|
return
|
||||||
}
|
}
|
||||||
vpnIP := ip2int(remoteCert.Details.Ips[0].IP)
|
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
|
||||||
certName := remoteCert.Details.Name
|
certName := remoteCert.Details.Name
|
||||||
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
|
issuer := remoteCert.Details.Issuer
|
||||||
|
|
||||||
myIndex, err := generateIndex()
|
if vpnIp == f.myVpnIp {
|
||||||
if err != nil {
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Refusing to handshake with myself")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.lightHouse.remoteAllowList.Allow(vpnIp, addr.IP) {
|
||||||
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
myIndex, err := generateIndex(f.l)
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||||
return true
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo, err = f.handshakeManager.AddIndex(myIndex, ci)
|
hostinfo := &HostInfo{
|
||||||
if err != nil {
|
ConnectionState: ci,
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
|
localIndexId: myIndex,
|
||||||
WithField("certName", certName).
|
remoteIndexId: hs.Details.InitiatorIndex,
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Error adding index to connection manager")
|
vpnIp: vpnIp,
|
||||||
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
return true
|
lastHandshakeTime: hs.Details.Time,
|
||||||
}
|
}
|
||||||
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
|
|
||||||
|
hostinfo.Lock()
|
||||||
|
defer hostinfo.Unlock()
|
||||||
|
|
||||||
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
Info("Handshake message received")
|
Info("Handshake message received")
|
||||||
|
|
||||||
hostinfo.remoteIndexId = hs.Details.InitiatorIndex
|
|
||||||
hs.Details.ResponderIndex = myIndex
|
hs.Details.ResponderIndex = myIndex
|
||||||
hs.Details.Cert = ci.certState.rawCertificateNoKey
|
hs.Details.Cert = ci.certState.rawCertificateNoKey
|
||||||
|
// Update the time in case their clock is way off from ours
|
||||||
|
hs.Details.Time = uint64(time.Now().UnixNano())
|
||||||
|
|
||||||
hsBytes, err := proto.Marshal(hs)
|
hsBytes, err := proto.Marshal(hs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||||
return true
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
header := HeaderEncode(make([]byte, HeaderLen), Version, uint8(handshake), handshakeIXPSK0, hs.Details.InitiatorIndex, 2)
|
nh := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, hs.Details.InitiatorIndex, 2)
|
||||||
msg, dKey, eKey, err := ci.H.WriteMessage(header, hsBytes)
|
msg, dKey, eKey, err := ci.H.WriteMessage(nh, hsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||||
return true
|
return
|
||||||
}
|
} else if dKey == nil || eKey == nil {
|
||||||
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
if f.hostMap.CheckHandshakeCompleteIP(vpnIP) && vpnIP < ip2int(f.certState.certificate.Details.Ips[0].IP) {
|
|
||||||
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("issuer", issuer).
|
||||||
Info("Prevented a handshake race")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Noise did not arrive at a key")
|
||||||
|
return
|
||||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
|
||||||
f.SendMessageToVpnIp(test, testRequest, vpnIP, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.HandshakePacket[0] = make([]byte, len(packet[HeaderLen:]))
|
hostinfo.HandshakePacket[0] = make([]byte, len(packet[header.Len:]))
|
||||||
copy(hostinfo.HandshakePacket[0], packet[HeaderLen:])
|
copy(hostinfo.HandshakePacket[0], packet[header.Len:])
|
||||||
|
|
||||||
// Regardless of whether you are the sender or receiver, you should arrive here
|
// Regardless of whether you are the sender or receiver, you should arrive here
|
||||||
// and complete standing up the connection.
|
// and complete standing up the connection.
|
||||||
if dKey != nil && eKey != nil {
|
|
||||||
hostinfo.HandshakePacket[2] = make([]byte, len(msg))
|
hostinfo.HandshakePacket[2] = make([]byte, len(msg))
|
||||||
copy(hostinfo.HandshakePacket[2], msg)
|
copy(hostinfo.HandshakePacket[2], msg)
|
||||||
|
|
||||||
|
// We are sending handshake packet 2, so we don't expect to receive
|
||||||
|
// handshake packet 2 from the initiator.
|
||||||
|
ci.window.Update(f.l, 2)
|
||||||
|
|
||||||
|
ci.peerCert = remoteCert
|
||||||
|
ci.dKey = NewNebulaCipherState(dKey)
|
||||||
|
ci.eKey = NewNebulaCipherState(eKey)
|
||||||
|
|
||||||
|
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||||
|
hostinfo.SetRemote(addr)
|
||||||
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
|
|
||||||
|
// Only overwrite existing record if we should win the handshake race
|
||||||
|
overwrite := vpnIp > f.myVpnIp
|
||||||
|
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, overwrite, f)
|
||||||
|
if err != nil {
|
||||||
|
switch err {
|
||||||
|
case ErrAlreadySeen:
|
||||||
|
// Update remote if preferred (Note we have to switch to locking
|
||||||
|
// the existing hostinfo, and then switch back so the defer Unlock
|
||||||
|
// higher in this function still works)
|
||||||
|
hostinfo.Unlock()
|
||||||
|
existing.Lock()
|
||||||
|
// Update remote if preferred
|
||||||
|
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||||
|
// Send a test packet to ensure the other side has also switched to
|
||||||
|
// the preferred remote
|
||||||
|
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
|
}
|
||||||
|
existing.Unlock()
|
||||||
|
hostinfo.Lock()
|
||||||
|
|
||||||
|
msg = existing.HandshakePacket[2]
|
||||||
|
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||||
err := f.outside.WriteTo(msg, addr)
|
err := f.outside.WriteTo(msg, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
||||||
|
WithError(err).Error("Failed to send handshake message")
|
||||||
|
} else {
|
||||||
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
||||||
|
Info("Handshake message sent")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case ErrExistingHostInfo:
|
||||||
|
// This means there was an existing tunnel and this handshake was older than the one we are currently based on
|
||||||
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
|
WithField("oldHandshakeTime", existing.lastHandshakeTime).
|
||||||
|
WithField("newHandshakeTime", hostinfo.lastHandshakeTime).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
Info("Handshake too old")
|
||||||
|
|
||||||
|
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||||
|
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
|
return
|
||||||
|
case ErrLocalIndexCollision:
|
||||||
|
// This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry
|
||||||
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
|
||||||
|
Error("Failed to add HostInfo due to localIndex collision")
|
||||||
|
return
|
||||||
|
case ErrExistingHandshake:
|
||||||
|
// We have a race where both parties think they are an initiator and this tunnel lost, let the other one finish
|
||||||
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
Error("Prevented a pending handshake race")
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
|
||||||
|
// And we forget to update it here
|
||||||
|
f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
Error("Failed to add HostInfo to HostMap")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the send
|
||||||
|
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||||
|
err = f.outside.WriteTo(msg, addr)
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
WithError(err).Error("Failed to send handshake")
|
WithError(err).Error("Failed to send handshake")
|
||||||
} else {
|
} else {
|
||||||
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
|
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
ip = ip2int(remoteCert.Details.Ips[0].IP)
|
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||||
ci.peerCert = remoteCert
|
|
||||||
ci.dKey = NewNebulaCipherState(dKey)
|
|
||||||
ci.eKey = NewNebulaCipherState(eKey)
|
|
||||||
//l.Debugln("got symmetric pairs")
|
|
||||||
|
|
||||||
//hostinfo.ClearRemotes()
|
return
|
||||||
hostinfo.AddRemote(*addr)
|
|
||||||
hostinfo.CreateRemoteCIDR(remoteCert)
|
|
||||||
f.lightHouse.AddRemoteAndReset(ip, addr)
|
|
||||||
if f.serveDns {
|
|
||||||
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
ho, err := f.hostMap.QueryVpnIP(vpnIP)
|
|
||||||
if err == nil && ho.localIndexId != 0 {
|
|
||||||
l.WithField("vpnIp", vpnIP).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("action", "removing stale index").
|
|
||||||
WithField("index", ho.localIndexId).
|
|
||||||
Debug("Handshake processing")
|
|
||||||
f.hostMap.DeleteIndex(ho.localIndexId)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.hostMap.AddIndexHostInfo(hostinfo.localIndexId, hostinfo)
|
|
||||||
f.hostMap.AddVpnIPHostInfo(vpnIP, hostinfo)
|
|
||||||
|
|
||||||
hostinfo.handshakeComplete()
|
|
||||||
} else {
|
|
||||||
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
Error("Noise did not arrive at a key")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
f.hostMap.AddRemote(ip, addr)
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage2(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet []byte, h *Header) bool {
|
func ixHandshakeStage2(f *Interface, addr *udp.Addr, hostinfo *HostInfo, packet []byte, h *header.H) bool {
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
|
// Nothing here to tear down, got a bogus stage 2 packet
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes.Equal(hostinfo.HandshakePacket[2], packet[HeaderLen:]) {
|
hostinfo.Lock()
|
||||||
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
defer hostinfo.Unlock()
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
|
||||||
Error("Already seen this handshake packet")
|
if !f.lightHouse.remoteAllowList.Allow(hostinfo.vpnIp, addr.IP) {
|
||||||
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := hostinfo.ConnectionState
|
ci := hostinfo.ConnectionState
|
||||||
// Mark packet 2 as seen so it doesn't show up as missed
|
if ci.ready {
|
||||||
ci.window.Update(2)
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
||||||
|
Info("Handshake is already complete")
|
||||||
|
|
||||||
hostinfo.HandshakePacket[2] = make([]byte, len(packet[HeaderLen:]))
|
// Update remote if preferred
|
||||||
copy(hostinfo.HandshakePacket[2], packet[HeaderLen:])
|
if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||||
|
// Send a test packet to ensure the other side has also switched to
|
||||||
|
// the preferred remote
|
||||||
|
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
|
}
|
||||||
|
|
||||||
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[HeaderLen:])
|
// We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
||||||
Error("Failed to call noise.ReadMessage")
|
Error("Failed to call noise.ReadMessage")
|
||||||
|
|
||||||
@@ -277,85 +375,114 @@ func ixHandshakeStage2(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet [
|
|||||||
// to DOS us. Every other error condition after should to allow a possible good handshake to complete in the
|
// to DOS us. Every other error condition after should to allow a possible good handshake to complete in the
|
||||||
// near future
|
// near future
|
||||||
return false
|
return false
|
||||||
|
} else if dKey == nil || eKey == nil {
|
||||||
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
|
Error("Noise did not arrive at a key")
|
||||||
|
|
||||||
|
// This should be impossible in IX but just in case, if we get here then there is no chance to recover
|
||||||
|
// the handshake state machine. Tear it down
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
hs := &NebulaHandshake{}
|
hs := &NebulaHandshake{}
|
||||||
err = proto.Unmarshal(msg, hs)
|
err = proto.Unmarshal(msg, hs)
|
||||||
if err != nil || hs.Details == nil {
|
if err != nil || hs.Details == nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
|
||||||
|
|
||||||
|
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert)
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
Error("Invalid certificate from host")
|
Error("Invalid certificate from host")
|
||||||
|
|
||||||
|
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
vpnIP := ip2int(remoteCert.Details.Ips[0].IP)
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
|
||||||
certName := remoteCert.Details.Name
|
certName := remoteCert.Details.Name
|
||||||
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
|
issuer := remoteCert.Details.Issuer
|
||||||
|
|
||||||
|
// Ensure the right host responded
|
||||||
|
if vpnIp != hostinfo.vpnIp {
|
||||||
|
f.l.WithField("intendedVpnIp", hostinfo.vpnIp).WithField("haveVpnIp", vpnIp).
|
||||||
|
WithField("udpAddr", addr).WithField("certName", certName).
|
||||||
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
|
Info("Incorrect host responded to handshake")
|
||||||
|
|
||||||
|
// Release our old handshake from pending, it should not continue
|
||||||
|
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||||
|
|
||||||
|
// Create a new hostinfo/handshake for the intended vpn ip
|
||||||
|
//TODO: this adds it to the timer wheel in a way that aggressively retries
|
||||||
|
newHostInfo := f.getOrHandshake(hostinfo.vpnIp)
|
||||||
|
newHostInfo.Lock()
|
||||||
|
|
||||||
|
// Block the current used address
|
||||||
|
newHostInfo.remotes = hostinfo.remotes
|
||||||
|
newHostInfo.remotes.BlockRemote(addr)
|
||||||
|
|
||||||
|
// Get the correct remote list for the host we did handshake with
|
||||||
|
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||||
|
|
||||||
|
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||||
|
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
|
||||||
|
Info("Blocked addresses for handshakes")
|
||||||
|
|
||||||
|
// Swap the packet store to benefit the original intended recipient
|
||||||
|
hostinfo.ConnectionState.queueLock.Lock()
|
||||||
|
newHostInfo.packetStore = hostinfo.packetStore
|
||||||
|
hostinfo.packetStore = []*cachedPacket{}
|
||||||
|
hostinfo.ConnectionState.queueLock.Unlock()
|
||||||
|
|
||||||
|
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
||||||
|
hostinfo.vpnIp = vpnIp
|
||||||
|
f.sendCloseTunnel(hostinfo)
|
||||||
|
newHostInfo.Unlock()
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark packet 2 as seen so it doesn't show up as missed
|
||||||
|
ci.window.Update(f.l, 2)
|
||||||
|
|
||||||
duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
|
duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
|
||||||
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
WithField("durationNs", duration).
|
WithField("durationNs", duration).
|
||||||
|
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||||
Info("Handshake message received")
|
Info("Handshake message received")
|
||||||
|
|
||||||
//ci.remoteIndex = hs.ResponderIndex
|
|
||||||
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
||||||
hs.Details.Cert = ci.certState.rawCertificateNoKey
|
hostinfo.lastHandshakeTime = hs.Details.Time
|
||||||
|
|
||||||
/*
|
// Store their cert and our symmetric keys
|
||||||
hsBytes, err := proto.Marshal(hs)
|
|
||||||
if err != nil {
|
|
||||||
l.Debugln("Failed to marshal handshake: ", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Regardless of whether you are the sender or receiver, you should arrive here
|
|
||||||
// and complete standing up the connection.
|
|
||||||
if dKey != nil && eKey != nil {
|
|
||||||
ip := ip2int(remoteCert.Details.Ips[0].IP)
|
|
||||||
ci.peerCert = remoteCert
|
ci.peerCert = remoteCert
|
||||||
ci.dKey = NewNebulaCipherState(dKey)
|
ci.dKey = NewNebulaCipherState(dKey)
|
||||||
ci.eKey = NewNebulaCipherState(eKey)
|
ci.eKey = NewNebulaCipherState(eKey)
|
||||||
//l.Debugln("got symmetric pairs")
|
|
||||||
|
|
||||||
//hostinfo.ClearRemotes()
|
// Make sure the current udpAddr being used is set for responding
|
||||||
f.hostMap.AddRemote(ip, addr)
|
hostinfo.SetRemote(addr)
|
||||||
|
|
||||||
|
// Build up the radix for the firewall if we have subnets in the cert
|
||||||
hostinfo.CreateRemoteCIDR(remoteCert)
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
f.lightHouse.AddRemoteAndReset(ip, addr)
|
|
||||||
if f.serveDns {
|
|
||||||
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
ho, err := f.hostMap.QueryVpnIP(vpnIP)
|
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||||
if err == nil && ho.localIndexId != 0 {
|
//TODO: Complete here does not do a race avoidance, it will just take the new tunnel. Is this ok?
|
||||||
l.WithField("vpnIp", vpnIP).
|
f.handshakeManager.Complete(hostinfo, f)
|
||||||
WithField("certName", certName).
|
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||||
WithField("action", "removing stale index").
|
|
||||||
WithField("index", ho.localIndexId).
|
|
||||||
Debug("Handshake processing")
|
|
||||||
f.hostMap.DeleteIndex(ho.localIndexId)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.hostMap.AddVpnIPHostInfo(vpnIP, hostinfo)
|
|
||||||
f.hostMap.AddIndexHostInfo(hostinfo.localIndexId, hostinfo)
|
|
||||||
|
|
||||||
hostinfo.handshakeComplete()
|
|
||||||
f.metricHandshakes.Update(duration)
|
f.metricHandshakes.Update(duration)
|
||||||
} else {
|
|
||||||
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
|
||||||
Error("Noise did not arrive at a key")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,192 +1,360 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Total time to try a handshake = sequence of HandshakeTryInterval * HandshakeRetries
|
|
||||||
// With 100ms interval and 20 retries is 23.5 seconds
|
|
||||||
DefaultHandshakeTryInterval = time.Millisecond * 100
|
DefaultHandshakeTryInterval = time.Millisecond * 100
|
||||||
DefaultHandshakeRetries = 20
|
DefaultHandshakeRetries = 10
|
||||||
// DefaultHandshakeWaitRotation is the number of handshake attempts to do before starting to use other ips addresses
|
DefaultHandshakeTriggerBuffer = 64
|
||||||
DefaultHandshakeWaitRotation = 5
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
defaultHandshakeConfig = HandshakeConfig{
|
defaultHandshakeConfig = HandshakeConfig{
|
||||||
tryInterval: DefaultHandshakeTryInterval,
|
tryInterval: DefaultHandshakeTryInterval,
|
||||||
retries: DefaultHandshakeRetries,
|
retries: DefaultHandshakeRetries,
|
||||||
waitRotation: DefaultHandshakeWaitRotation,
|
triggerBuffer: DefaultHandshakeTriggerBuffer,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type HandshakeConfig struct {
|
type HandshakeConfig struct {
|
||||||
tryInterval time.Duration
|
tryInterval time.Duration
|
||||||
retries int
|
retries int
|
||||||
waitRotation int
|
triggerBuffer int
|
||||||
|
|
||||||
|
messageMetrics *MessageMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
type HandshakeManager struct {
|
type HandshakeManager struct {
|
||||||
pendingHostMap *HostMap
|
pendingHostMap *HostMap
|
||||||
mainHostMap *HostMap
|
mainHostMap *HostMap
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
outside *udpConn
|
outside *udp.Conn
|
||||||
config HandshakeConfig
|
config HandshakeConfig
|
||||||
|
|
||||||
OutboundHandshakeTimer *SystemTimerWheel
|
OutboundHandshakeTimer *SystemTimerWheel
|
||||||
InboundHandshakeTimer *SystemTimerWheel
|
messageMetrics *MessageMetrics
|
||||||
|
metricInitiated metrics.Counter
|
||||||
|
metricTimedOut metrics.Counter
|
||||||
|
l *logrus.Logger
|
||||||
|
|
||||||
|
// can be used to trigger outbound handshake for the given vpnIp
|
||||||
|
trigger chan iputil.VpnIp
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHandshakeManager(tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udpConn, config HandshakeConfig) *HandshakeManager {
|
func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||||
return &HandshakeManager{
|
return &HandshakeManager{
|
||||||
pendingHostMap: NewHostMap("pending", tunCidr, preferredRanges),
|
pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges),
|
||||||
mainHostMap: mainHostMap,
|
mainHostMap: mainHostMap,
|
||||||
lightHouse: lightHouse,
|
lightHouse: lightHouse,
|
||||||
outside: outside,
|
outside: outside,
|
||||||
|
|
||||||
config: config,
|
config: config,
|
||||||
|
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
|
||||||
OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, config.tryInterval*time.Duration(config.retries)),
|
OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
||||||
InboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, config.tryInterval*time.Duration(config.retries)),
|
messageMetrics: config.messageMetrics,
|
||||||
|
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
||||||
|
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) Run(f EncWriter) {
|
func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) {
|
||||||
clockSource := time.Tick(c.config.tryInterval)
|
clockSource := time.NewTicker(c.config.tryInterval)
|
||||||
for now := range clockSource {
|
defer clockSource.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case vpnIP := <-c.trigger:
|
||||||
|
c.l.WithField("vpnIp", vpnIP).Debug("HandshakeManager: triggered")
|
||||||
|
c.handleOutbound(vpnIP, f, true)
|
||||||
|
case now := <-clockSource.C:
|
||||||
c.NextOutboundHandshakeTimerTick(now, f)
|
c.NextOutboundHandshakeTimerTick(now, f)
|
||||||
c.NextInboundHandshakeTimerTick(now)
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) {
|
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) {
|
||||||
c.OutboundHandshakeTimer.advance(now)
|
c.OutboundHandshakeTimer.advance(now)
|
||||||
for {
|
for {
|
||||||
ep := c.OutboundHandshakeTimer.Purge()
|
ep := c.OutboundHandshakeTimer.Purge()
|
||||||
if ep == nil {
|
if ep == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
vpnIP := ep.(uint32)
|
vpnIp := ep.(iputil.VpnIp)
|
||||||
|
c.handleOutbound(vpnIp, f, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
index, err := c.pendingHostMap.GetIndexByVpnIP(vpnIP)
|
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) {
|
||||||
|
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
return
|
||||||
|
}
|
||||||
|
hostinfo.Lock()
|
||||||
|
defer hostinfo.Unlock()
|
||||||
|
|
||||||
|
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed.
|
||||||
|
if hostinfo.HandshakeComplete {
|
||||||
|
// Ensure we don't exist in the pending hostmap anymore since we have completed
|
||||||
|
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo, err := c.pendingHostMap.QueryVpnIP(vpnIP)
|
// Check if we have a handshake packet to transmit yet
|
||||||
if err != nil {
|
if !hostinfo.HandshakeReady {
|
||||||
continue
|
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly
|
||||||
|
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState
|
||||||
|
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we haven't finished the handshake and we haven't hit max retries, query
|
// If we are out of time, clean up
|
||||||
// lighthouse and then send the handshake packet again.
|
if hostinfo.HandshakeCounter >= c.config.retries {
|
||||||
if hostinfo.HandshakeCounter < c.config.retries && !hostinfo.HandshakeComplete {
|
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)).
|
||||||
if hostinfo.remote == nil {
|
|
||||||
// We continue to query the lighthouse because hosts may
|
|
||||||
// come online during handshake retries. If the query
|
|
||||||
// succeeds (no error), add the lighthouse info to hostinfo
|
|
||||||
ips, err := c.lightHouse.Query(vpnIP, f)
|
|
||||||
if err == nil {
|
|
||||||
for _, ip := range ips {
|
|
||||||
hostinfo.AddRemote(ip)
|
|
||||||
}
|
|
||||||
hostinfo.ForcePromoteBest(c.mainHostMap.preferredRanges)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hostinfo.HandshakeCounter++
|
|
||||||
|
|
||||||
// We want to use the "best" calculated ip for the first 5 attempts, after that we just blindly rotate through
|
|
||||||
// all the others until we can stand up a connection.
|
|
||||||
if hostinfo.HandshakeCounter > c.config.waitRotation {
|
|
||||||
hostinfo.rotateRemote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handshake is ready to avoid a race in timer tick and stage 0 handshake generation
|
|
||||||
if hostinfo.HandshakeReady && hostinfo.remote != nil {
|
|
||||||
err := c.outside.WriteTo(hostinfo.HandshakePacket[0], hostinfo.remote)
|
|
||||||
if err != nil {
|
|
||||||
hostinfo.logger().WithField("udpAddr", hostinfo.remote).
|
|
||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
WithField("remoteIndex", hostinfo.remoteIndexId).
|
WithField("remoteIndex", hostinfo.remoteIndexId).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
WithError(err).Error("Failed to send handshake message")
|
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
|
||||||
} else {
|
Info("Handshake timed out")
|
||||||
//TODO: this log line is assuming a lot of stuff around the cached stage 0 handshake packet, we should
|
c.metricTimedOut.Inc(1)
|
||||||
// keep the real packet struct around for logging purposes
|
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||||
hostinfo.logger().WithField("udpAddr", hostinfo.remote).
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// We only care about a lighthouse trigger before the first handshake transmit attempt. This is a very specific
|
||||||
|
// optimization for a fast lighthouse reply
|
||||||
|
//TODO: it would feel better to do this once, anytime, as our delay increases over time
|
||||||
|
if lighthouseTriggered && hostinfo.HandshakeCounter > 0 {
|
||||||
|
// If we didn't return here a lighthouse could cause us to aggressively send handshakes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a remotes object if we don't already have one.
|
||||||
|
// This is mainly to protect us as this should never be the case
|
||||||
|
if hostinfo.remotes == nil {
|
||||||
|
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: this will generate a load of queries for hosts with only 1 ip (i'm not using a lighthouse, static mapped)
|
||||||
|
if hostinfo.remotes.Len(c.pendingHostMap.preferredRanges) <= 1 {
|
||||||
|
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
||||||
|
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
||||||
|
// the learned public ip for them. Query again to short circuit the promotion counter
|
||||||
|
c.lightHouse.QueryServer(vpnIp, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
||||||
|
var sentTo []*udp.Addr
|
||||||
|
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
||||||
|
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||||
|
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
||||||
|
if err != nil {
|
||||||
|
hostinfo.logger(c.l).WithField("udpAddr", addr).
|
||||||
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
|
WithError(err).Error("Failed to send handshake message")
|
||||||
|
|
||||||
|
} else {
|
||||||
|
sentTo = append(sentTo, addr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout
|
||||||
|
if len(sentTo) > 0 {
|
||||||
|
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
WithField("remoteIndex", hostinfo.remoteIndexId).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Readd to the timer wheel so we continue trying wait HandshakeTryInterval * counter longer for next try
|
// Increment the counter to increase our delay, linear backoff
|
||||||
//l.Infoln("Interval: ", HandshakeTryInterval*time.Duration(hostinfo.HandshakeCounter))
|
hostinfo.HandshakeCounter++
|
||||||
c.OutboundHandshakeTimer.Add(vpnIP, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
|
||||||
} else {
|
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
||||||
c.pendingHostMap.DeleteVpnIP(vpnIP)
|
if !lighthouseTriggered {
|
||||||
c.pendingHostMap.DeleteIndex(index)
|
//TODO: feel like we dupe handshake real fast in a tight loop, why?
|
||||||
}
|
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) NextInboundHandshakeTimerTick(now time.Time) {
|
func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
||||||
c.InboundHandshakeTimer.advance(now)
|
hostinfo := c.pendingHostMap.AddVpnIp(vpnIp)
|
||||||
for {
|
|
||||||
ep := c.InboundHandshakeTimer.Purge()
|
|
||||||
if ep == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
index := ep.(uint32)
|
|
||||||
|
|
||||||
vpnIP, err := c.pendingHostMap.GetVpnIPByIndex(index)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.pendingHostMap.DeleteIndex(index)
|
|
||||||
c.pendingHostMap.DeleteVpnIP(vpnIP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *HandshakeManager) AddVpnIP(vpnIP uint32) *HostInfo {
|
|
||||||
hostinfo := c.pendingHostMap.AddVpnIP(vpnIP)
|
|
||||||
// We lock here and use an array to insert items to prevent locking the
|
// We lock here and use an array to insert items to prevent locking the
|
||||||
// main receive thread for very long by waiting to add items to the pending map
|
// main receive thread for very long by waiting to add items to the pending map
|
||||||
c.OutboundHandshakeTimer.Add(vpnIP, c.config.tryInterval)
|
//TODO: what lock?
|
||||||
|
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
|
||||||
|
c.metricInitiated.Inc(1)
|
||||||
|
|
||||||
return hostinfo
|
return hostinfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) DeleteVpnIP(vpnIP uint32) {
|
var (
|
||||||
//l.Debugln("Deleting pending vpn ip :", IntIp(vpnIP))
|
ErrExistingHostInfo = errors.New("existing hostinfo")
|
||||||
c.pendingHostMap.DeleteVpnIP(vpnIP)
|
ErrAlreadySeen = errors.New("already seen")
|
||||||
}
|
ErrLocalIndexCollision = errors.New("local index collision")
|
||||||
|
ErrExistingHandshake = errors.New("existing handshake")
|
||||||
|
)
|
||||||
|
|
||||||
func (c *HandshakeManager) AddIndex(index uint32, ci *ConnectionState) (*HostInfo, error) {
|
// CheckAndComplete checks for any conflicts in the main and pending hostmap
|
||||||
hostinfo, err := c.pendingHostMap.AddIndex(index, ci)
|
// before adding hostinfo to main. If err is nil, it was added. Otherwise err will be:
|
||||||
if err != nil {
|
//
|
||||||
return nil, fmt.Errorf("Issue adding index: %d", index)
|
// ErrAlreadySeen if we already have an entry in the hostmap that has seen the
|
||||||
|
// exact same handshake packet
|
||||||
|
//
|
||||||
|
// ErrExistingHostInfo if we already have an entry in the hostmap for this
|
||||||
|
// VpnIp and the new handshake was older than the one we currently have
|
||||||
|
//
|
||||||
|
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
||||||
|
// hostmap for the hostinfo.localIndexId.
|
||||||
|
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, overwrite bool, f *Interface) (*HostInfo, error) {
|
||||||
|
c.pendingHostMap.Lock()
|
||||||
|
defer c.pendingHostMap.Unlock()
|
||||||
|
c.mainHostMap.Lock()
|
||||||
|
defer c.mainHostMap.Unlock()
|
||||||
|
|
||||||
|
// Check if we already have a tunnel with this vpn ip
|
||||||
|
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
||||||
|
if found && existingHostInfo != nil {
|
||||||
|
// Is it just a delayed handshake packet?
|
||||||
|
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) {
|
||||||
|
return existingHostInfo, ErrAlreadySeen
|
||||||
}
|
}
|
||||||
//c.mainHostMap.AddIndexHostInfo(index, hostinfo)
|
|
||||||
c.InboundHandshakeTimer.Add(index, time.Second*10)
|
// Is this a newer handshake?
|
||||||
return hostinfo, nil
|
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime {
|
||||||
|
return existingHostInfo, ErrExistingHostInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
existingHostInfo.logger(c.l).Info("Taking new handshake")
|
||||||
|
}
|
||||||
|
|
||||||
|
existingIndex, found := c.mainHostMap.Indexes[hostinfo.localIndexId]
|
||||||
|
if found {
|
||||||
|
// We have a collision, but for a different hostinfo
|
||||||
|
return existingIndex, ErrLocalIndexCollision
|
||||||
|
}
|
||||||
|
|
||||||
|
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId]
|
||||||
|
if found && existingIndex != hostinfo {
|
||||||
|
// We have a collision, but for a different hostinfo
|
||||||
|
return existingIndex, ErrLocalIndexCollision
|
||||||
|
}
|
||||||
|
|
||||||
|
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
|
if found && existingRemoteIndex != nil && existingRemoteIndex.vpnIp != hostinfo.vpnIp {
|
||||||
|
// We have a collision, but this can happen since we can't control
|
||||||
|
// the remote ID. Just log about the situation as a note.
|
||||||
|
hostinfo.logger(c.l).
|
||||||
|
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
||||||
|
Info("New host shadows existing host remoteIndex")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we are also handshaking with this vpn ip
|
||||||
|
pendingHostInfo, found := c.pendingHostMap.Hosts[hostinfo.vpnIp]
|
||||||
|
if found && pendingHostInfo != nil {
|
||||||
|
if !overwrite {
|
||||||
|
// We won, let our pending handshake win
|
||||||
|
return pendingHostInfo, ErrExistingHandshake
|
||||||
|
}
|
||||||
|
|
||||||
|
// We lost, take this handshake and move any cached packets over so they get sent
|
||||||
|
pendingHostInfo.ConnectionState.queueLock.Lock()
|
||||||
|
hostinfo.packetStore = append(hostinfo.packetStore, pendingHostInfo.packetStore...)
|
||||||
|
c.pendingHostMap.unlockedDeleteHostInfo(pendingHostInfo)
|
||||||
|
pendingHostInfo.ConnectionState.queueLock.Unlock()
|
||||||
|
pendingHostInfo.logger(c.l).Info("Handshake race lost, replacing pending handshake with completed tunnel")
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingHostInfo != nil {
|
||||||
|
// We are going to overwrite this entry, so remove the old references
|
||||||
|
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
|
||||||
|
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
|
||||||
|
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mainHostMap.addHostInfo(hostinfo, f)
|
||||||
|
return existingHostInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) AddIndexHostInfo(index uint32, h *HostInfo) {
|
// Complete is a simpler version of CheckAndComplete when we already know we
|
||||||
c.pendingHostMap.AddIndexHostInfo(index, h)
|
// won't have a localIndexId collision because we already have an entry in the
|
||||||
|
// pendingHostMap
|
||||||
|
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
||||||
|
c.pendingHostMap.Lock()
|
||||||
|
defer c.pendingHostMap.Unlock()
|
||||||
|
c.mainHostMap.Lock()
|
||||||
|
defer c.mainHostMap.Unlock()
|
||||||
|
|
||||||
|
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
||||||
|
if found && existingHostInfo != nil {
|
||||||
|
// We are going to overwrite this entry, so remove the old references
|
||||||
|
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
|
||||||
|
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
|
||||||
|
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
|
||||||
|
}
|
||||||
|
|
||||||
|
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
|
if found && existingRemoteIndex != nil {
|
||||||
|
// We have a collision, but this can happen since we can't control
|
||||||
|
// the remote ID. Just log about the situation as a note.
|
||||||
|
hostinfo.logger(c.l).
|
||||||
|
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
||||||
|
Info("New host shadows existing host remoteIndex")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mainHostMap.addHostInfo(hostinfo, f)
|
||||||
|
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) DeleteIndex(index uint32) {
|
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
|
||||||
//l.Debugln("Deleting pending index :", index)
|
// and adds it to the pendingHostMap. Will error if we are unable to generate
|
||||||
c.pendingHostMap.DeleteIndex(index)
|
// a unique localIndexId
|
||||||
|
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
||||||
|
c.pendingHostMap.Lock()
|
||||||
|
defer c.pendingHostMap.Unlock()
|
||||||
|
c.mainHostMap.RLock()
|
||||||
|
defer c.mainHostMap.RUnlock()
|
||||||
|
|
||||||
|
for i := 0; i < 32; i++ {
|
||||||
|
index, err := generateIndex(c.l)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, inPending := c.pendingHostMap.Indexes[index]
|
||||||
|
_, inMain := c.mainHostMap.Indexes[index]
|
||||||
|
|
||||||
|
if !inMain && !inPending {
|
||||||
|
h.localIndexId = index
|
||||||
|
c.pendingHostMap.Indexes[index] = h
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("failed to generate unique localIndexId")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
||||||
|
c.pendingHostMap.addRemoteIndexHostInfo(index, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
||||||
|
//l.Debugln("Deleting pending hostinfo :", hostinfo)
|
||||||
|
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
|
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
|
||||||
@@ -200,18 +368,28 @@ func (c *HandshakeManager) EmitStats() {
|
|||||||
|
|
||||||
// Utility functions below
|
// Utility functions below
|
||||||
|
|
||||||
func generateIndex() (uint32, error) {
|
func generateIndex(l *logrus.Logger) (uint32, error) {
|
||||||
b := make([]byte, 4)
|
b := make([]byte, 4)
|
||||||
|
|
||||||
|
// Let zero mean we don't know the ID, so don't generate zero
|
||||||
|
var index uint32
|
||||||
|
for index == 0 {
|
||||||
_, err := rand.Read(b)
|
_, err := rand.Read(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Errorln(err)
|
l.Errorln(err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
index := binary.BigEndian.Uint32(b)
|
index = binary.BigEndian.Uint32(b)
|
||||||
|
}
|
||||||
|
|
||||||
if l.Level >= logrus.DebugLevel {
|
if l.Level >= logrus.DebugLevel {
|
||||||
l.WithField("index", index).
|
l.WithField("index", index).
|
||||||
Debug("Generated index")
|
Debug("Generated index")
|
||||||
}
|
}
|
||||||
return index, nil
|
return index, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hsTimeout(tries int, interval time.Duration) time.Duration {
|
||||||
|
return time.Duration(tries / 2 * ((2 * int(interval)) + (tries-1)*int(interval)))
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,190 +5,108 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
var indexes []uint32 = []uint32{1000, 2000, 3000, 4000}
|
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
//var ips []uint32 = []uint32{9000, 9999999, 3, 292394923}
|
|
||||||
var ips []uint32
|
|
||||||
|
|
||||||
func Test_NewHandshakeManagerIndex(t *testing.T) {
|
|
||||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
ips = []uint32{ip2int(net.ParseIP("172.1.1.2"))}
|
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
|
||||||
mainHM := NewHostMap("test", vpncidr, preferredRanges)
|
|
||||||
|
|
||||||
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
blah.NextInboundHandshakeTimerTick(now)
|
|
||||||
|
|
||||||
// Add four indexes
|
|
||||||
for _, v := range indexes {
|
|
||||||
blah.AddIndex(v, &ConnectionState{})
|
|
||||||
}
|
|
||||||
// Confirm they are in the pending index list
|
|
||||||
for _, v := range indexes {
|
|
||||||
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(v))
|
|
||||||
}
|
|
||||||
// Adding something to pending should not affect the main hostmap
|
|
||||||
assert.Len(t, mainHM.Indexes, 0)
|
|
||||||
// Jump ahead 8 seconds
|
|
||||||
for i := 1; i <= DefaultHandshakeRetries; i++ {
|
|
||||||
next_tick := now.Add(DefaultHandshakeTryInterval * time.Duration(i))
|
|
||||||
blah.NextInboundHandshakeTimerTick(next_tick)
|
|
||||||
}
|
|
||||||
// Confirm they are still in the pending index list
|
|
||||||
for _, v := range indexes {
|
|
||||||
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(v))
|
|
||||||
}
|
|
||||||
// Jump ahead 4 more seconds
|
|
||||||
next_tick := now.Add(12 * time.Second)
|
|
||||||
blah.NextInboundHandshakeTimerTick(next_tick)
|
|
||||||
// Confirm they have been removed
|
|
||||||
for _, v := range indexes {
|
|
||||||
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(v))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_NewHandshakeManagerVpnIP(t *testing.T) {
|
|
||||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
|
||||||
ips = []uint32{ip2int(net.ParseIP("172.1.1.2"))}
|
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
mw := &mockEncWriter{}
|
mw := &mockEncWriter{}
|
||||||
mainHM := NewHostMap("test", vpncidr, preferredRanges)
|
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||||
|
|
||||||
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
|
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, &LightHouse{}, &udp.Conn{}, defaultHandshakeConfig)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
blah.NextOutboundHandshakeTimerTick(now, mw)
|
||||||
|
|
||||||
// Add four "IPs" - which are just uint32s
|
i := blah.AddVpnIp(ip)
|
||||||
for _, v := range ips {
|
i.remotes = NewRemoteList()
|
||||||
blah.AddVpnIP(v)
|
i.HandshakeReady = true
|
||||||
}
|
|
||||||
// Adding something to pending should not affect the main hostmap
|
// Adding something to pending should not affect the main hostmap
|
||||||
assert.Len(t, mainHM.Hosts, 0)
|
assert.Len(t, mainHM.Hosts, 0)
|
||||||
// Confirm they are in the pending index list
|
|
||||||
for _, v := range ips {
|
|
||||||
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Jump ahead `HandshakeRetries` ticks
|
// Confirm they are in the pending index list
|
||||||
cumulative := time.Duration(0)
|
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
||||||
for i := 0; i <= DefaultHandshakeRetries+1; i++ {
|
|
||||||
cumulative += time.Duration(i)*DefaultHandshakeTryInterval + 1
|
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
|
||||||
next_tick := now.Add(cumulative)
|
for i := 1; i <= DefaultHandshakeRetries+1; i++ {
|
||||||
//l.Infoln(next_tick)
|
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
|
||||||
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
|
blah.NextOutboundHandshakeTimerTick(now, mw)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Confirm they are still in the pending index list
|
// Confirm they are still in the pending index list
|
||||||
for _, v := range ips {
|
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
||||||
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(v))
|
|
||||||
}
|
// Tick 1 more time, a minute will certainly flush it out
|
||||||
// Jump ahead 1 more second
|
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw)
|
||||||
cumulative += time.Duration(DefaultHandshakeRetries+1) * DefaultHandshakeTryInterval
|
|
||||||
next_tick := now.Add(cumulative)
|
|
||||||
//l.Infoln(next_tick)
|
|
||||||
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
|
|
||||||
// Confirm they have been removed
|
// Confirm they have been removed
|
||||||
for _, v := range ips {
|
assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
|
||||||
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(v))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewHandshakeManagerVpnIPcleanup(t *testing.T) {
|
func Test_NewHandshakeManagerTrigger(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
vpnIP = ip2int(net.ParseIP("172.1.1.2"))
|
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
mw := &mockEncWriter{}
|
mw := &mockEncWriter{}
|
||||||
mainHM := NewHostMap("test", vpncidr, preferredRanges)
|
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||||
|
lh := &LightHouse{addrMap: make(map[iputil.VpnIp]*RemoteList), l: l}
|
||||||
|
|
||||||
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
|
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
blah.NextOutboundHandshakeTimerTick(now, mw)
|
||||||
|
|
||||||
hostinfo := blah.AddVpnIP(vpnIP)
|
assert.Equal(t, 0, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
||||||
// Pretned we have an index too
|
|
||||||
blah.AddIndexHostInfo(12341234, hostinfo)
|
|
||||||
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(12341234))
|
|
||||||
|
|
||||||
// Jump ahead `HandshakeRetries` ticks. Eviction should happen in pending
|
hi := blah.AddVpnIp(ip)
|
||||||
// but not main hostmap
|
hi.HandshakeReady = true
|
||||||
cumulative := time.Duration(0)
|
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
||||||
for i := 1; i <= DefaultHandshakeRetries+2; i++ {
|
assert.Equal(t, 0, hi.HandshakeCounter, "Should not have attempted a handshake yet")
|
||||||
cumulative += DefaultHandshakeTryInterval * time.Duration(i)
|
|
||||||
next_tick := now.Add(cumulative)
|
|
||||||
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
for i := 0; i <= HandshakeRetries+1; i++ {
|
|
||||||
next_tick := now.Add(cumulative)
|
|
||||||
//l.Infoln(next_tick)
|
|
||||||
blah.NextOutboundHandshakeTimerTick(next_tick)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
/*
|
|
||||||
for i := 0; i <= HandshakeRetries+1; i++ {
|
|
||||||
next_tick := now.Add(time.Duration(i) * time.Second)
|
|
||||||
blah.NextOutboundHandshakeTimerTick(next_tick)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
// Trigger the same method the channel will but, this should set our remotes pointer
|
||||||
cumulative += HandshakeTryInterval*time.Duration(HandshakeRetries) + 3
|
blah.handleOutbound(ip, mw, true)
|
||||||
next_tick := now.Add(cumulative)
|
assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have done a handshake attempt")
|
||||||
l.Infoln(cumulative, next_tick)
|
assert.NotNil(t, hi.remotes, "Manager should have set my remotes pointer")
|
||||||
blah.NextOutboundHandshakeTimerTick(next_tick)
|
|
||||||
*/
|
// Make sure the trigger doesn't double schedule the timer entry
|
||||||
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(vpnIP))
|
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
||||||
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(12341234))
|
|
||||||
|
uaddr := udp.NewAddrFromString("10.1.1.1:4242")
|
||||||
|
hi.remotes.unlockedPrependV4(ip, NewIp4AndPort(uaddr.IP, uint32(uaddr.Port)))
|
||||||
|
|
||||||
|
// We now have remotes but only the first trigger should have pushed things forward
|
||||||
|
blah.handleOutbound(ip, mw, true)
|
||||||
|
assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have not done a handshake attempt")
|
||||||
|
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewHandshakeManagerIndexcleanup(t *testing.T) {
|
func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) {
|
||||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
for _, i := range tw.wheel {
|
||||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
n := i.Head
|
||||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
for n != nil {
|
||||||
preferredRanges := []*net.IPNet{localrange}
|
c++
|
||||||
mainHM := NewHostMap("test", vpncidr, preferredRanges)
|
n = n.Next
|
||||||
|
|
||||||
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
blah.NextInboundHandshakeTimerTick(now)
|
|
||||||
|
|
||||||
hostinfo, _ := blah.AddIndex(12341234, &ConnectionState{})
|
|
||||||
// Pretned we have an index too
|
|
||||||
blah.pendingHostMap.AddVpnIPHostInfo(101010, hostinfo)
|
|
||||||
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(101010))
|
|
||||||
|
|
||||||
for i := 1; i <= DefaultHandshakeRetries+2; i++ {
|
|
||||||
next_tick := now.Add(DefaultHandshakeTryInterval * time.Duration(i))
|
|
||||||
blah.NextInboundHandshakeTimerTick(next_tick)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
next_tick := now.Add(DefaultHandshakeTryInterval*DefaultHandshakeRetries + 3)
|
return c
|
||||||
blah.NextInboundHandshakeTimerTick(next_tick)
|
|
||||||
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(101010))
|
|
||||||
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(12341234))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockEncWriter struct {
|
type mockEncWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) {
|
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package nebula
|
package header
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
@@ -19,82 +19,78 @@ import (
|
|||||||
// |-----------------------------------------------------------------------|
|
// |-----------------------------------------------------------------------|
|
||||||
// | payload... |
|
// | payload... |
|
||||||
|
|
||||||
|
type m map[string]interface{}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Version uint8 = 1
|
Version uint8 = 1
|
||||||
HeaderLen = 16
|
Len = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
type NebulaMessageType uint8
|
type MessageType uint8
|
||||||
type NebulaMessageSubType uint8
|
type MessageSubType uint8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
handshake NebulaMessageType = 0
|
Handshake MessageType = 0
|
||||||
message NebulaMessageType = 1
|
Message MessageType = 1
|
||||||
recvError NebulaMessageType = 2
|
RecvError MessageType = 2
|
||||||
lightHouse NebulaMessageType = 3
|
LightHouse MessageType = 3
|
||||||
test NebulaMessageType = 4
|
Test MessageType = 4
|
||||||
closeTunnel NebulaMessageType = 5
|
CloseTunnel MessageType = 5
|
||||||
|
|
||||||
//TODO These are deprecated as of 06/12/2018 - NB
|
|
||||||
testRemote NebulaMessageType = 6
|
|
||||||
testRemoteReply NebulaMessageType = 7
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var typeMap = map[NebulaMessageType]string{
|
var typeMap = map[MessageType]string{
|
||||||
handshake: "handshake",
|
Handshake: "handshake",
|
||||||
message: "message",
|
Message: "message",
|
||||||
recvError: "recvError",
|
RecvError: "recvError",
|
||||||
lightHouse: "lightHouse",
|
LightHouse: "lightHouse",
|
||||||
test: "test",
|
Test: "test",
|
||||||
closeTunnel: "closeTunnel",
|
CloseTunnel: "closeTunnel",
|
||||||
|
|
||||||
//TODO These are deprecated as of 06/12/2018 - NB
|
|
||||||
testRemote: "testRemote",
|
|
||||||
testRemoteReply: "testRemoteReply",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
testRequest NebulaMessageSubType = 0
|
TestRequest MessageSubType = 0
|
||||||
testReply NebulaMessageSubType = 1
|
TestReply MessageSubType = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
var eHeaderTooShort = errors.New("header is too short")
|
const (
|
||||||
|
HandshakeIXPSK0 MessageSubType = 0
|
||||||
|
HandshakeXXPSK0 MessageSubType = 1
|
||||||
|
)
|
||||||
|
|
||||||
var subTypeTestMap = map[NebulaMessageSubType]string{
|
var ErrHeaderTooShort = errors.New("header is too short")
|
||||||
testRequest: "testRequest",
|
|
||||||
testReply: "testReply",
|
var subTypeTestMap = map[MessageSubType]string{
|
||||||
|
TestRequest: "testRequest",
|
||||||
|
TestReply: "testReply",
|
||||||
}
|
}
|
||||||
|
|
||||||
var subTypeNoneMap = map[NebulaMessageSubType]string{0: "none"}
|
var subTypeNoneMap = map[MessageSubType]string{0: "none"}
|
||||||
|
|
||||||
var subTypeMap = map[NebulaMessageType]*map[NebulaMessageSubType]string{
|
var subTypeMap = map[MessageType]*map[MessageSubType]string{
|
||||||
message: &subTypeNoneMap,
|
Message: &subTypeNoneMap,
|
||||||
recvError: &subTypeNoneMap,
|
RecvError: &subTypeNoneMap,
|
||||||
lightHouse: &subTypeNoneMap,
|
LightHouse: &subTypeNoneMap,
|
||||||
test: &subTypeTestMap,
|
Test: &subTypeTestMap,
|
||||||
closeTunnel: &subTypeNoneMap,
|
CloseTunnel: &subTypeNoneMap,
|
||||||
handshake: {
|
Handshake: {
|
||||||
handshakeIXPSK0: "ix_psk0",
|
HandshakeIXPSK0: "ix_psk0",
|
||||||
},
|
},
|
||||||
//TODO: these are deprecated
|
|
||||||
testRemote: &subTypeNoneMap,
|
|
||||||
testRemoteReply: &subTypeNoneMap,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Header struct {
|
type H struct {
|
||||||
Version uint8
|
Version uint8
|
||||||
Type NebulaMessageType
|
Type MessageType
|
||||||
Subtype NebulaMessageSubType
|
Subtype MessageSubType
|
||||||
Reserved uint16
|
Reserved uint16
|
||||||
RemoteIndex uint32
|
RemoteIndex uint32
|
||||||
MessageCounter uint64
|
MessageCounter uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeaderEncode uses the provided byte array to encode the provided header values into.
|
// Encode uses the provided byte array to encode the provided header values into.
|
||||||
// Byte array must be capped higher than HeaderLen or this will panic
|
// Byte array must be capped higher than HeaderLen or this will panic
|
||||||
func HeaderEncode(b []byte, v uint8, t uint8, st uint8, ri uint32, c uint64) []byte {
|
func Encode(b []byte, v uint8, t MessageType, st MessageSubType, ri uint32, c uint64) []byte {
|
||||||
b = b[:HeaderLen]
|
b = b[:Len]
|
||||||
b[0] = byte(v<<4 | (t & 0x0f))
|
b[0] = v<<4 | byte(t&0x0f)
|
||||||
b[1] = byte(st)
|
b[1] = byte(st)
|
||||||
binary.BigEndian.PutUint16(b[2:4], 0)
|
binary.BigEndian.PutUint16(b[2:4], 0)
|
||||||
binary.BigEndian.PutUint32(b[4:8], ri)
|
binary.BigEndian.PutUint32(b[4:8], ri)
|
||||||
@@ -103,7 +99,7 @@ func HeaderEncode(b []byte, v uint8, t uint8, st uint8, ri uint32, c uint64) []b
|
|||||||
}
|
}
|
||||||
|
|
||||||
// String creates a readable string representation of a header
|
// String creates a readable string representation of a header
|
||||||
func (h *Header) String() string {
|
func (h *H) String() string {
|
||||||
if h == nil {
|
if h == nil {
|
||||||
return "<nil>"
|
return "<nil>"
|
||||||
}
|
}
|
||||||
@@ -112,7 +108,7 @@ func (h *Header) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON creates a json string representation of a header
|
// MarshalJSON creates a json string representation of a header
|
||||||
func (h *Header) MarshalJSON() ([]byte, error) {
|
func (h *H) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(m{
|
return json.Marshal(m{
|
||||||
"version": h.Version,
|
"version": h.Version,
|
||||||
"type": h.TypeName(),
|
"type": h.TypeName(),
|
||||||
@@ -124,24 +120,24 @@ func (h *Header) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encode turns header into bytes
|
// Encode turns header into bytes
|
||||||
func (h *Header) Encode(b []byte) ([]byte, error) {
|
func (h *H) Encode(b []byte) ([]byte, error) {
|
||||||
if h == nil {
|
if h == nil {
|
||||||
return nil, errors.New("nil header")
|
return nil, errors.New("nil header")
|
||||||
}
|
}
|
||||||
|
|
||||||
return HeaderEncode(b, h.Version, uint8(h.Type), uint8(h.Subtype), h.RemoteIndex, h.MessageCounter), nil
|
return Encode(b, h.Version, h.Type, h.Subtype, h.RemoteIndex, h.MessageCounter), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse is a helper function to parses given bytes into new Header struct
|
// Parse is a helper function to parses given bytes into new Header struct
|
||||||
func (h *Header) Parse(b []byte) error {
|
func (h *H) Parse(b []byte) error {
|
||||||
if len(b) < HeaderLen {
|
if len(b) < Len {
|
||||||
return eHeaderTooShort
|
return ErrHeaderTooShort
|
||||||
}
|
}
|
||||||
// get upper 4 bytes
|
// get upper 4 bytes
|
||||||
h.Version = uint8((b[0] >> 4) & 0x0f)
|
h.Version = uint8((b[0] >> 4) & 0x0f)
|
||||||
// get lower 4 bytes
|
// get lower 4 bytes
|
||||||
h.Type = NebulaMessageType(b[0] & 0x0f)
|
h.Type = MessageType(b[0] & 0x0f)
|
||||||
h.Subtype = NebulaMessageSubType(b[1])
|
h.Subtype = MessageSubType(b[1])
|
||||||
h.Reserved = binary.BigEndian.Uint16(b[2:4])
|
h.Reserved = binary.BigEndian.Uint16(b[2:4])
|
||||||
h.RemoteIndex = binary.BigEndian.Uint32(b[4:8])
|
h.RemoteIndex = binary.BigEndian.Uint32(b[4:8])
|
||||||
h.MessageCounter = binary.BigEndian.Uint64(b[8:16])
|
h.MessageCounter = binary.BigEndian.Uint64(b[8:16])
|
||||||
@@ -149,12 +145,12 @@ func (h *Header) Parse(b []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TypeName will transform the headers message type into a human string
|
// TypeName will transform the headers message type into a human string
|
||||||
func (h *Header) TypeName() string {
|
func (h *H) TypeName() string {
|
||||||
return TypeName(h.Type)
|
return TypeName(h.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TypeName will transform a nebula message type into a human string
|
// TypeName will transform a nebula message type into a human string
|
||||||
func TypeName(t NebulaMessageType) string {
|
func TypeName(t MessageType) string {
|
||||||
if n, ok := typeMap[t]; ok {
|
if n, ok := typeMap[t]; ok {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
@@ -163,12 +159,12 @@ func TypeName(t NebulaMessageType) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SubTypeName will transform the headers message sub type into a human string
|
// SubTypeName will transform the headers message sub type into a human string
|
||||||
func (h *Header) SubTypeName() string {
|
func (h *H) SubTypeName() string {
|
||||||
return SubTypeName(h.Type, h.Subtype)
|
return SubTypeName(h.Type, h.Subtype)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubTypeName will transform a nebula message sub type into a human string
|
// SubTypeName will transform a nebula message sub type into a human string
|
||||||
func SubTypeName(t NebulaMessageType, s NebulaMessageSubType) string {
|
func SubTypeName(t MessageType, s MessageSubType) string {
|
||||||
if n, ok := subTypeMap[t]; ok {
|
if n, ok := subTypeMap[t]; ok {
|
||||||
if x, ok := (*n)[s]; ok {
|
if x, ok := (*n)[s]; ok {
|
||||||
return x
|
return x
|
||||||
@@ -179,8 +175,8 @@ func SubTypeName(t NebulaMessageType, s NebulaMessageSubType) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewHeader turns bytes into a header
|
// NewHeader turns bytes into a header
|
||||||
func NewHeader(b []byte) (*Header, error) {
|
func NewHeader(b []byte) (*H, error) {
|
||||||
h := new(Header)
|
h := new(H)
|
||||||
if err := h.Parse(b); err != nil {
|
if err := h.Parse(b); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
115
header/header_test.go
Normal file
115
header/header_test.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
package header
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type headerTest struct {
|
||||||
|
expectedBytes []byte
|
||||||
|
*H
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0001 0010 00010010
|
||||||
|
var headerBigEndianTests = []headerTest{{
|
||||||
|
expectedBytes: []byte{0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9},
|
||||||
|
// 1010 0000
|
||||||
|
H: &H{
|
||||||
|
// 1111 1+2+4+8 = 15
|
||||||
|
Version: 5,
|
||||||
|
Type: 4,
|
||||||
|
Subtype: 0,
|
||||||
|
Reserved: 0,
|
||||||
|
RemoteIndex: 10,
|
||||||
|
MessageCounter: 9,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncode(t *testing.T) {
|
||||||
|
for _, tt := range headerBigEndianTests {
|
||||||
|
b, err := tt.Encode(make([]byte, Len))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, tt.expectedBytes, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParse(t *testing.T) {
|
||||||
|
for _, tt := range headerBigEndianTests {
|
||||||
|
b := tt.expectedBytes
|
||||||
|
parsedHeader := &H{}
|
||||||
|
parsedHeader.Parse(b)
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(tt.H, parsedHeader) {
|
||||||
|
t.Fatalf("got %#v; want %#v", parsedHeader, tt.H)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTypeName(t *testing.T) {
|
||||||
|
assert.Equal(t, "test", TypeName(Test))
|
||||||
|
assert.Equal(t, "test", (&H{Type: Test}).TypeName())
|
||||||
|
|
||||||
|
assert.Equal(t, "unknown", TypeName(99))
|
||||||
|
assert.Equal(t, "unknown", (&H{Type: 99}).TypeName())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSubTypeName(t *testing.T) {
|
||||||
|
assert.Equal(t, "testRequest", SubTypeName(Test, TestRequest))
|
||||||
|
assert.Equal(t, "testRequest", (&H{Type: Test, Subtype: TestRequest}).SubTypeName())
|
||||||
|
|
||||||
|
assert.Equal(t, "unknown", SubTypeName(99, TestRequest))
|
||||||
|
assert.Equal(t, "unknown", (&H{Type: 99, Subtype: TestRequest}).SubTypeName())
|
||||||
|
|
||||||
|
assert.Equal(t, "unknown", SubTypeName(Test, 99))
|
||||||
|
assert.Equal(t, "unknown", (&H{Type: Test, Subtype: 99}).SubTypeName())
|
||||||
|
|
||||||
|
assert.Equal(t, "none", SubTypeName(Message, 0))
|
||||||
|
assert.Equal(t, "none", (&H{Type: Message, Subtype: 0}).SubTypeName())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTypeMap(t *testing.T) {
|
||||||
|
// Force people to document this stuff
|
||||||
|
assert.Equal(t, map[MessageType]string{
|
||||||
|
Handshake: "handshake",
|
||||||
|
Message: "message",
|
||||||
|
RecvError: "recvError",
|
||||||
|
LightHouse: "lightHouse",
|
||||||
|
Test: "test",
|
||||||
|
CloseTunnel: "closeTunnel",
|
||||||
|
}, typeMap)
|
||||||
|
|
||||||
|
assert.Equal(t, map[MessageType]*map[MessageSubType]string{
|
||||||
|
Message: &subTypeNoneMap,
|
||||||
|
RecvError: &subTypeNoneMap,
|
||||||
|
LightHouse: &subTypeNoneMap,
|
||||||
|
Test: &subTypeTestMap,
|
||||||
|
CloseTunnel: &subTypeNoneMap,
|
||||||
|
Handshake: {
|
||||||
|
HandshakeIXPSK0: "ix_psk0",
|
||||||
|
},
|
||||||
|
}, subTypeMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHeader_String(t *testing.T) {
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"ver=100 type=test subtype=testRequest reserved=0x63 remoteindex=98 messagecounter=97",
|
||||||
|
(&H{100, Test, TestRequest, 99, 98, 97}).String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHeader_MarshalJSON(t *testing.T) {
|
||||||
|
b, err := (&H{100, Test, TestRequest, 99, 98, 97}).MarshalJSON()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"{\"messageCounter\":97,\"remoteIndex\":98,\"reserved\":99,\"subType\":\"testRequest\",\"type\":\"test\",\"version\":100}",
|
||||||
|
string(b),
|
||||||
|
)
|
||||||
|
}
|
||||||
118
header_test.go
118
header_test.go
@@ -1,118 +0,0 @@
|
|||||||
package nebula
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
type headerTest struct {
|
|
||||||
expectedBytes []byte
|
|
||||||
*Header
|
|
||||||
}
|
|
||||||
|
|
||||||
// 0001 0010 00010010
|
|
||||||
var headerBigEndianTests = []headerTest{{
|
|
||||||
expectedBytes: []byte{0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9},
|
|
||||||
// 1010 0000
|
|
||||||
Header: &Header{
|
|
||||||
// 1111 1+2+4+8 = 15
|
|
||||||
Version: 5,
|
|
||||||
Type: 4,
|
|
||||||
Subtype: 0,
|
|
||||||
Reserved: 0,
|
|
||||||
RemoteIndex: 10,
|
|
||||||
MessageCounter: 9,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncode(t *testing.T) {
|
|
||||||
for _, tt := range headerBigEndianTests {
|
|
||||||
b, err := tt.Encode(make([]byte, HeaderLen))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, tt.expectedBytes, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParse(t *testing.T) {
|
|
||||||
for _, tt := range headerBigEndianTests {
|
|
||||||
b := tt.expectedBytes
|
|
||||||
parsedHeader := &Header{}
|
|
||||||
parsedHeader.Parse(b)
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(tt.Header, parsedHeader) {
|
|
||||||
t.Fatalf("got %#v; want %#v", parsedHeader, tt.Header)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTypeName(t *testing.T) {
|
|
||||||
assert.Equal(t, "test", TypeName(test))
|
|
||||||
assert.Equal(t, "test", (&Header{Type: test}).TypeName())
|
|
||||||
|
|
||||||
assert.Equal(t, "unknown", TypeName(99))
|
|
||||||
assert.Equal(t, "unknown", (&Header{Type: 99}).TypeName())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSubTypeName(t *testing.T) {
|
|
||||||
assert.Equal(t, "testRequest", SubTypeName(test, testRequest))
|
|
||||||
assert.Equal(t, "testRequest", (&Header{Type: test, Subtype: testRequest}).SubTypeName())
|
|
||||||
|
|
||||||
assert.Equal(t, "unknown", SubTypeName(99, testRequest))
|
|
||||||
assert.Equal(t, "unknown", (&Header{Type: 99, Subtype: testRequest}).SubTypeName())
|
|
||||||
|
|
||||||
assert.Equal(t, "unknown", SubTypeName(test, 99))
|
|
||||||
assert.Equal(t, "unknown", (&Header{Type: test, Subtype: 99}).SubTypeName())
|
|
||||||
|
|
||||||
assert.Equal(t, "none", SubTypeName(message, 0))
|
|
||||||
assert.Equal(t, "none", (&Header{Type: message, Subtype: 0}).SubTypeName())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTypeMap(t *testing.T) {
|
|
||||||
// Force people to document this stuff
|
|
||||||
assert.Equal(t, map[NebulaMessageType]string{
|
|
||||||
handshake: "handshake",
|
|
||||||
message: "message",
|
|
||||||
recvError: "recvError",
|
|
||||||
lightHouse: "lightHouse",
|
|
||||||
test: "test",
|
|
||||||
closeTunnel: "closeTunnel",
|
|
||||||
testRemote: "testRemote",
|
|
||||||
testRemoteReply: "testRemoteReply",
|
|
||||||
}, typeMap)
|
|
||||||
|
|
||||||
assert.Equal(t, map[NebulaMessageType]*map[NebulaMessageSubType]string{
|
|
||||||
message: &subTypeNoneMap,
|
|
||||||
recvError: &subTypeNoneMap,
|
|
||||||
lightHouse: &subTypeNoneMap,
|
|
||||||
test: &subTypeTestMap,
|
|
||||||
closeTunnel: &subTypeNoneMap,
|
|
||||||
handshake: {
|
|
||||||
handshakeIXPSK0: "ix_psk0",
|
|
||||||
},
|
|
||||||
testRemote: &subTypeNoneMap,
|
|
||||||
testRemoteReply: &subTypeNoneMap,
|
|
||||||
}, subTypeMap)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHeader_String(t *testing.T) {
|
|
||||||
assert.Equal(
|
|
||||||
t,
|
|
||||||
"ver=100 type=test subtype=testRequest reserved=0x63 remoteindex=98 messagecounter=97",
|
|
||||||
(&Header{100, test, testRequest, 99, 98, 97}).String(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHeader_MarshalJSON(t *testing.T) {
|
|
||||||
b, err := (&Header{100, test, testRequest, 99, 98, 97}).MarshalJSON()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(
|
|
||||||
t,
|
|
||||||
"{\"messageCounter\":97,\"remoteIndex\":98,\"reserved\":99,\"subType\":\"testRequest\",\"type\":\"test\",\"version\":100}",
|
|
||||||
string(b),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
784
hostmap.go
784
hostmap.go
File diff suppressed because it is too large
Load Diff
165
hostmap_test.go
165
hostmap_test.go
@@ -1,166 +1 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
func TestHostInfoDestProbe(t *testing.T) {
|
|
||||||
a, _ := net.ResolveUDPAddr("udp", "1.0.0.1:22222")
|
|
||||||
d := NewHostInfoDest(a)
|
|
||||||
|
|
||||||
// 999 probes that all return should give a 100% success rate
|
|
||||||
for i := 0; i < 999; i++ {
|
|
||||||
meh := d.Probe()
|
|
||||||
d.ProbeReceived(meh)
|
|
||||||
}
|
|
||||||
assert.Equal(t, d.Grade(), float64(1))
|
|
||||||
|
|
||||||
// 999 probes of which only half return should give a 50% success rate
|
|
||||||
for i := 0; i < 999; i++ {
|
|
||||||
meh := d.Probe()
|
|
||||||
if i%2 == 0 {
|
|
||||||
d.ProbeReceived(meh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, d.Grade(), float64(.5))
|
|
||||||
|
|
||||||
// 999 probes of which none return should give a 0% success rate
|
|
||||||
for i := 0; i < 999; i++ {
|
|
||||||
d.Probe()
|
|
||||||
}
|
|
||||||
assert.Equal(t, d.Grade(), float64(0))
|
|
||||||
|
|
||||||
// 999 probes of which only 1/4 return should give a 25% success rate
|
|
||||||
for i := 0; i < 999; i++ {
|
|
||||||
meh := d.Probe()
|
|
||||||
if i%4 == 0 {
|
|
||||||
d.ProbeReceived(meh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, d.Grade(), float64(.25))
|
|
||||||
|
|
||||||
// 999 probes of which only half return and are duplicates should give a 50% success rate
|
|
||||||
for i := 0; i < 999; i++ {
|
|
||||||
meh := d.Probe()
|
|
||||||
if i%2 == 0 {
|
|
||||||
d.ProbeReceived(meh)
|
|
||||||
d.ProbeReceived(meh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, d.Grade(), float64(.5))
|
|
||||||
|
|
||||||
// 999 probes of which only way old replies return should give a 0% success rate
|
|
||||||
for i := 0; i < 999; i++ {
|
|
||||||
meh := d.Probe()
|
|
||||||
d.ProbeReceived(meh - 101)
|
|
||||||
}
|
|
||||||
assert.Equal(t, d.Grade(), float64(0))
|
|
||||||
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
func TestHostmap(t *testing.T) {
|
|
||||||
_, myNet, _ := net.ParseCIDR("10.128.0.0/16")
|
|
||||||
_, localToMe, _ := net.ParseCIDR("192.168.1.0/24")
|
|
||||||
myNets := []*net.IPNet{myNet}
|
|
||||||
preferredRanges := []*net.IPNet{localToMe}
|
|
||||||
|
|
||||||
m := NewHostMap("test", myNet, preferredRanges)
|
|
||||||
|
|
||||||
a := NewUDPAddrFromString("10.127.0.3:11111")
|
|
||||||
b := NewUDPAddrFromString("1.0.0.1:22222")
|
|
||||||
y := NewUDPAddrFromString("10.128.0.3:11111")
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), a)
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), b)
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
|
|
||||||
|
|
||||||
info, _ := m.QueryVpnIP(ip2int(net.ParseIP("10.128.1.1")))
|
|
||||||
|
|
||||||
// There should be three remotes in the host map
|
|
||||||
assert.Equal(t, 3, len(info.Remotes))
|
|
||||||
|
|
||||||
// Adding an identical remote should not change the count
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
|
|
||||||
assert.Equal(t, 3, len(info.Remotes))
|
|
||||||
|
|
||||||
// Adding a fresh remote should add one
|
|
||||||
y = NewUDPAddrFromString("10.18.0.3:11111")
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
|
|
||||||
assert.Equal(t, 4, len(info.Remotes))
|
|
||||||
|
|
||||||
// Query and reference remote should get the first one (and not nil)
|
|
||||||
info, _ = m.QueryVpnIP(ip2int(net.ParseIP("10.128.1.1")))
|
|
||||||
assert.NotNil(t, info.remote)
|
|
||||||
|
|
||||||
// Promotion should ensure that the best remote is chosen (y)
|
|
||||||
info.ForcePromoteBest(myNets)
|
|
||||||
assert.True(t, myNet.Contains(udp2ip(info.remote)))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHostmapdebug(t *testing.T) {
|
|
||||||
_, myNet, _ := net.ParseCIDR("10.128.0.0/16")
|
|
||||||
_, localToMe, _ := net.ParseCIDR("192.168.1.0/24")
|
|
||||||
preferredRanges := []*net.IPNet{localToMe}
|
|
||||||
m := NewHostMap("test", myNet, preferredRanges)
|
|
||||||
|
|
||||||
a := NewUDPAddrFromString("10.127.0.3:11111")
|
|
||||||
b := NewUDPAddrFromString("1.0.0.1:22222")
|
|
||||||
y := NewUDPAddrFromString("10.128.0.3:11111")
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), a)
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), b)
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
|
|
||||||
|
|
||||||
//t.Errorf("%s", m.DebugRemotes(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHostMap_rotateRemote(t *testing.T) {
|
|
||||||
h := HostInfo{}
|
|
||||||
// 0 remotes, no panic
|
|
||||||
h.rotateRemote()
|
|
||||||
assert.Nil(t, h.remote)
|
|
||||||
|
|
||||||
// 1 remote, no panic
|
|
||||||
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 1}), 0))
|
|
||||||
h.rotateRemote()
|
|
||||||
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 1}))
|
|
||||||
|
|
||||||
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 2}), 0))
|
|
||||||
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 3}), 0))
|
|
||||||
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 4}), 0))
|
|
||||||
|
|
||||||
// Rotate through those 3
|
|
||||||
h.rotateRemote()
|
|
||||||
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 2}))
|
|
||||||
|
|
||||||
h.rotateRemote()
|
|
||||||
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 3}))
|
|
||||||
|
|
||||||
h.rotateRemote()
|
|
||||||
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 4}))
|
|
||||||
|
|
||||||
// Finally, we should start over
|
|
||||||
h.rotateRemote()
|
|
||||||
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 1}))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHostmappromote2(b *testing.B) {
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
_, myNet, _ := net.ParseCIDR("10.128.0.0/16")
|
|
||||||
_, localToMe, _ := net.ParseCIDR("192.168.1.0/24")
|
|
||||||
preferredRanges := []*net.IPNet{localToMe}
|
|
||||||
m := NewHostMap("test", myNet, preferredRanges)
|
|
||||||
y := NewUDPAddrFromString("10.128.0.3:11111")
|
|
||||||
a := NewUDPAddrFromString("10.127.0.3:11111")
|
|
||||||
g := NewUDPAddrFromString("1.0.0.1:22222")
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), a)
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), g)
|
|
||||||
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
|
|
||||||
}
|
|
||||||
b.Errorf("hi")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|||||||
184
inside.go
184
inside.go
@@ -3,14 +3,17 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *FirewallPacket, nb, out []byte) {
|
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
err := newPacket(packet, false, fwPacket)
|
err := newPacket(packet, false, fwPacket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err)
|
f.l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -20,7 +23,7 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *FirewallPacket,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ignore packets from self to self
|
// Ignore packets from self to self
|
||||||
if fwPacket.RemoteIP == f.lightHouse.myIp {
|
if fwPacket.RemoteIP == f.myVpnIp {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -30,6 +33,14 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *FirewallPacket,
|
|||||||
}
|
}
|
||||||
|
|
||||||
hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
|
hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
|
||||||
|
if hostinfo == nil {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("vpnIp", fwPacket.RemoteIP).
|
||||||
|
WithField("fwPacket", fwPacket).
|
||||||
|
Debugln("dropping outbound packet, vpnIp not in our CIDR or in unsafe routes")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
ci := hostinfo.ConnectionState
|
ci := hostinfo.ConnectionState
|
||||||
|
|
||||||
if ci.ready == false {
|
if ci.ready == false {
|
||||||
@@ -37,95 +48,132 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *FirewallPacket,
|
|||||||
// the packet queue.
|
// the packet queue.
|
||||||
ci.queueLock.Lock()
|
ci.queueLock.Lock()
|
||||||
if !ci.ready {
|
if !ci.ready {
|
||||||
hostinfo.cachePacket(message, 0, packet, f.sendMessageNow)
|
hostinfo.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
||||||
ci.queueLock.Unlock()
|
ci.queueLock.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ci.queueLock.Unlock()
|
ci.queueLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.firewall.Drop(packet, *fwPacket, false, hostinfo, trustedCAs) {
|
dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.caPool, localCache)
|
||||||
f.send(message, 0, ci, hostinfo, hostinfo.remote, packet, nb, out)
|
if dropReason == nil {
|
||||||
if f.lightHouse != nil && *ci.messageCounter%5000 == 0 {
|
f.sendNoMetrics(header.Message, 0, ci, hostinfo, hostinfo.remote, packet, nb, out, q)
|
||||||
f.lightHouse.Query(fwPacket.RemoteIP, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if l.Level >= logrus.DebugLevel {
|
} else if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger().WithField("fwPacket", fwPacket).
|
hostinfo.logger(f.l).
|
||||||
|
WithField("fwPacket", fwPacket).
|
||||||
|
WithField("reason", dropReason).
|
||||||
Debugln("dropping outbound packet")
|
Debugln("dropping outbound packet")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) getOrHandshake(vpnIp uint32) *HostInfo {
|
// getOrHandshake returns nil if the vpnIp is not routable
|
||||||
if f.hostMap.vpnCIDR.Contains(int2ip(vpnIp)) == false {
|
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
|
||||||
|
//TODO: we can find contains without converting back to bytes
|
||||||
|
if f.hostMap.vpnCIDR.Contains(vpnIp.ToIP()) == false {
|
||||||
vpnIp = f.hostMap.queryUnsafeRoute(vpnIp)
|
vpnIp = f.hostMap.queryUnsafeRoute(vpnIp)
|
||||||
|
if vpnIp == 0 {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
hostinfo, err := f.hostMap.PromoteBestQueryVpnIP(vpnIp, f)
|
}
|
||||||
|
hostinfo, err := f.hostMap.PromoteBestQueryVpnIp(vpnIp, f)
|
||||||
|
|
||||||
//if err != nil || hostinfo.ConnectionState == nil {
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIP(vpnIp)
|
hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo = f.handshakeManager.AddVpnIP(vpnIp)
|
hostinfo = f.handshakeManager.AddVpnIp(vpnIp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := hostinfo.ConnectionState
|
ci := hostinfo.ConnectionState
|
||||||
|
|
||||||
if ci != nil && ci.eKey != nil && ci.ready {
|
if ci != nil && ci.eKey != nil && ci.ready {
|
||||||
return hostinfo
|
return hostinfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handshake is not ready, we need to grab the lock now before we start the handshake process
|
||||||
|
hostinfo.Lock()
|
||||||
|
defer hostinfo.Unlock()
|
||||||
|
|
||||||
|
// Double check, now that we have the lock
|
||||||
|
ci = hostinfo.ConnectionState
|
||||||
|
if ci != nil && ci.eKey != nil && ci.ready {
|
||||||
|
return hostinfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a connection state if we don't have one yet
|
||||||
if ci == nil {
|
if ci == nil {
|
||||||
// if we don't have a connection state, then send a handshake initiation
|
// Generate a PSK based on our config, this may be nil
|
||||||
ci = f.newConnectionState(true, noise.HandshakeIX, []byte{}, 0)
|
p, err := f.psk.MakeFor(vpnIp)
|
||||||
// FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us.
|
if err != nil {
|
||||||
//ci = f.newConnectionState(true, noise.HandshakeXX, []byte{}, 0)
|
//TODO: This isn't fatal specifically but it's pretty bad
|
||||||
|
f.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to get a PSK KDF")
|
||||||
|
return hostinfo
|
||||||
|
}
|
||||||
|
|
||||||
|
ci, err = f.newConnectionState(f.l, true, p)
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to get a connection state")
|
||||||
|
return hostinfo
|
||||||
|
}
|
||||||
hostinfo.ConnectionState = ci
|
hostinfo.ConnectionState = ci
|
||||||
} else if ci.eKey == nil {
|
|
||||||
// if we don't have any state at all, create it
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have already created the handshake packet, we don't want to call the function at all.
|
// If we have already created the handshake packet, we don't want to call the function at all.
|
||||||
if !hostinfo.HandshakeReady {
|
if !hostinfo.HandshakeReady {
|
||||||
ixHandshakeStage0(f, vpnIp, hostinfo)
|
ixHandshakeStage0(f, vpnIp, hostinfo)
|
||||||
// FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us.
|
|
||||||
//xx_handshakeStage0(f, ip, hostinfo)
|
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||||
|
// We can trigger the handshake right now
|
||||||
|
if _, ok := f.lightHouse.staticList[vpnIp]; ok {
|
||||||
|
select {
|
||||||
|
case f.handshakeManager.trigger <- vpnIp:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return hostinfo
|
return hostinfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendMessageNow(t NebulaMessageType, st NebulaMessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
||||||
fp := &FirewallPacket{}
|
fp := &firewall.Packet{}
|
||||||
err := newPacket(p, false, fp)
|
err := newPacket(p, false, fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Warnf("error while parsing outgoing packet for firewall check; %v", err)
|
f.l.Warnf("error while parsing outgoing packet for firewall check; %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if packet is in outbound fw rules
|
// check if packet is in outbound fw rules
|
||||||
if f.firewall.Drop(p, *fp, false, hostInfo, trustedCAs) {
|
dropReason := f.firewall.Drop(p, *fp, false, hostInfo, f.caPool, nil)
|
||||||
l.WithField("fwPacket", fp).Debugln("dropping cached packet")
|
if dropReason != nil {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("fwPacket", fp).
|
||||||
|
WithField("reason", dropReason).
|
||||||
|
Debugln("dropping cached packet")
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.send(message, st, hostInfo.ConnectionState, hostInfo, hostInfo.remote, p, nb, out)
|
f.sendNoMetrics(header.Message, st, hostInfo.ConnectionState, hostInfo, hostInfo.remote, p, nb, out, 0)
|
||||||
if f.lightHouse != nil && *hostInfo.ConnectionState.messageCounter%5000 == 0 {
|
|
||||||
f.lightHouse.Query(fp.RemoteIP, f)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
||||||
func (f *Interface) SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) {
|
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
||||||
hostInfo := f.getOrHandshake(vpnIp)
|
hostInfo := f.getOrHandshake(vpnIp)
|
||||||
|
if hostInfo == nil {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("vpnIp", vpnIp).
|
||||||
|
Debugln("dropping SendMessageToVpnIp, vpnIp not in our CIDR or in unsafe routes")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if !hostInfo.ConnectionState.ready {
|
if !hostInfo.ConnectionState.ready {
|
||||||
// Because we might be sending stored packets, lock here to stop new things going to
|
// Because we might be sending stored packets, lock here to stop new things going to
|
||||||
// the packet queue.
|
// the packet queue.
|
||||||
hostInfo.ConnectionState.queueLock.Lock()
|
hostInfo.ConnectionState.queueLock.Lock()
|
||||||
if !hostInfo.ConnectionState.ready {
|
if !hostInfo.ConnectionState.ready {
|
||||||
hostInfo.cachePacket(t, st, p, f.sendMessageToVpnIp)
|
hostInfo.cachePacket(f.l, t, st, p, f.sendMessageToVpnIp, f.cachedPacketMetrics)
|
||||||
hostInfo.ConnectionState.queueLock.Unlock()
|
hostInfo.ConnectionState.queueLock.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -136,37 +184,16 @@ func (f *Interface) SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubT
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) sendMessageToVpnIp(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
|
||||||
f.send(t, st, hostInfo.ConnectionState, hostInfo, hostInfo.remote, p, nb, out)
|
f.send(t, st, hostInfo.ConnectionState, hostInfo, hostInfo.remote, p, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessageToAll handles real ip:port lookup and sends to all known addresses for vpnIp
|
func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte) {
|
||||||
func (f *Interface) SendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) {
|
f.messageMetrics.Tx(t, st, 1)
|
||||||
hostInfo := f.getOrHandshake(vpnIp)
|
f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
|
||||||
|
|
||||||
if hostInfo.ConnectionState.ready == false {
|
|
||||||
// Because we might be sending stored packets, lock here to stop new things going to
|
|
||||||
// the packet queue.
|
|
||||||
hostInfo.ConnectionState.queueLock.Lock()
|
|
||||||
if !hostInfo.ConnectionState.ready {
|
|
||||||
hostInfo.cachePacket(t, st, p, f.sendMessageToAll)
|
|
||||||
hostInfo.ConnectionState.queueLock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hostInfo.ConnectionState.queueLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
f.sendMessageToAll(t, st, hostInfo, p, nb, out)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, hostInfo *HostInfo, p, nb, b []byte) {
|
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) {
|
||||||
for _, r := range hostInfo.RemoteUDPAddrs() {
|
|
||||||
f.send(t, st, hostInfo.ConnectionState, hostInfo, r, p, nb, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Interface) send(t NebulaMessageType, st NebulaMessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udpAddr, p, nb, out []byte) {
|
|
||||||
if ci.eKey == nil {
|
if ci.eKey == nil {
|
||||||
//TODO: log warning
|
//TODO: log warning
|
||||||
return
|
return
|
||||||
@@ -175,31 +202,44 @@ func (f *Interface) send(t NebulaMessageType, st NebulaMessageSubType, ci *Conne
|
|||||||
var err error
|
var err error
|
||||||
//TODO: enable if we do more than 1 tun queue
|
//TODO: enable if we do more than 1 tun queue
|
||||||
//ci.writeLock.Lock()
|
//ci.writeLock.Lock()
|
||||||
c := atomic.AddUint64(ci.messageCounter, 1)
|
c := atomic.AddUint64(&ci.atomicMessageCounter, 1)
|
||||||
|
|
||||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||||
out = HeaderEncode(out, Version, uint8(t), uint8(st), hostinfo.remoteIndexId, c)
|
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||||
f.connectionManager.Out(hostinfo.hostId)
|
f.connectionManager.Out(hostinfo.vpnIp)
|
||||||
|
|
||||||
|
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||||
|
// all our IPs and enable a faster roaming.
|
||||||
|
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
|
||||||
|
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
|
||||||
|
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
|
||||||
|
f.lightHouse.QueryServer(hostinfo.vpnIp, f)
|
||||||
|
hostinfo.lastRebindCount = f.rebindCount
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
||||||
//TODO: see above note on lock
|
//TODO: see above note on lock
|
||||||
//ci.writeLock.Unlock()
|
//ci.writeLock.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger().WithError(err).
|
hostinfo.logger(f.l).WithError(err).
|
||||||
WithField("udpAddr", remote).WithField("counter", c).
|
WithField("udpAddr", remote).WithField("counter", c).
|
||||||
WithField("attemptedCounter", ci.messageCounter).
|
WithField("attemptedCounter", c).
|
||||||
Error("Failed to encrypt outgoing packet")
|
Error("Failed to encrypt outgoing packet")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = f.outside.WriteTo(out, remote)
|
err = f.writers[q].WriteTo(out, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger().WithError(err).
|
hostinfo.logger(f.l).WithError(err).
|
||||||
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
||||||
}
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func isMulticast(ip uint32) bool {
|
func isMulticast(ip iputil.VpnIp) bool {
|
||||||
// Class D multicast
|
// Class D multicast
|
||||||
if (((ip >> 24) & 0xff) & 0xf0) == 0xe0 {
|
if (((ip >> 24) & 0xff) & 0xf0) == 0xe0 {
|
||||||
return true
|
return true
|
||||||
|
|||||||
241
interface.go
241
interface.go
@@ -1,19 +1,40 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const mtu = 9001
|
const mtu = 9001
|
||||||
|
|
||||||
|
type Inside interface {
|
||||||
|
io.ReadWriteCloser
|
||||||
|
Activate() error
|
||||||
|
CidrNet() *net.IPNet
|
||||||
|
DeviceName() string
|
||||||
|
WriteRaw([]byte) error
|
||||||
|
NewMultiQueueReader() (io.ReadWriteCloser, error)
|
||||||
|
}
|
||||||
|
|
||||||
type InterfaceConfig struct {
|
type InterfaceConfig struct {
|
||||||
HostMap *HostMap
|
HostMap *HostMap
|
||||||
Outside *udpConn
|
Outside *udp.Conn
|
||||||
Inside *Tun
|
Inside Inside
|
||||||
certState *CertState
|
certState *CertState
|
||||||
Cipher string
|
Cipher string
|
||||||
Firewall *Firewall
|
Firewall *Firewall
|
||||||
@@ -24,13 +45,21 @@ type InterfaceConfig struct {
|
|||||||
pendingDeletionInterval int
|
pendingDeletionInterval int
|
||||||
DropLocalBroadcast bool
|
DropLocalBroadcast bool
|
||||||
DropMulticast bool
|
DropMulticast bool
|
||||||
UDPBatchSize int
|
routines int
|
||||||
|
MessageMetrics *MessageMetrics
|
||||||
|
version string
|
||||||
|
caPool *cert.NebulaCAPool
|
||||||
|
disconnectInvalid bool
|
||||||
|
psk *Psk
|
||||||
|
|
||||||
|
ConntrackCacheTimeout time.Duration
|
||||||
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
type Interface struct {
|
type Interface struct {
|
||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
outside *udpConn
|
outside *udp.Conn
|
||||||
inside *Tun
|
inside Inside
|
||||||
certState *CertState
|
certState *CertState
|
||||||
cipher string
|
cipher string
|
||||||
firewall *Firewall
|
firewall *Firewall
|
||||||
@@ -39,18 +68,31 @@ type Interface struct {
|
|||||||
serveDns bool
|
serveDns bool
|
||||||
createTime time.Time
|
createTime time.Time
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
localBroadcast uint32
|
localBroadcast iputil.VpnIp
|
||||||
|
myVpnIp iputil.VpnIp
|
||||||
dropLocalBroadcast bool
|
dropLocalBroadcast bool
|
||||||
dropMulticast bool
|
dropMulticast bool
|
||||||
udpBatchSize int
|
routines int
|
||||||
|
caPool *cert.NebulaCAPool
|
||||||
|
disconnectInvalid bool
|
||||||
|
|
||||||
|
// rebindCount is used to decide if an active tunnel should trigger a punch notification through a lighthouse
|
||||||
|
rebindCount int8
|
||||||
version string
|
version string
|
||||||
|
|
||||||
metricRxRecvError metrics.Counter
|
conntrackCacheTimeout time.Duration
|
||||||
metricTxRecvError metrics.Counter
|
psk *Psk
|
||||||
|
writers []*udp.Conn
|
||||||
|
readers []io.ReadWriteCloser
|
||||||
|
|
||||||
metricHandshakes metrics.Histogram
|
metricHandshakes metrics.Histogram
|
||||||
|
messageMetrics *MessageMetrics
|
||||||
|
cachedPacketMetrics *cachedPacketMetrics
|
||||||
|
|
||||||
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInterface(c *InterfaceConfig) (*Interface, error) {
|
func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
||||||
if c.Outside == nil {
|
if c.Outside == nil {
|
||||||
return nil, errors.New("no outside connection")
|
return nil, errors.New("no outside connection")
|
||||||
}
|
}
|
||||||
@@ -64,6 +106,8 @@ func NewInterface(c *InterfaceConfig) (*Interface, error) {
|
|||||||
return nil, errors.New("no firewall rules")
|
return nil, errors.New("no firewall rules")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
myVpnIp := iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].IP)
|
||||||
|
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
hostMap: c.HostMap,
|
hostMap: c.HostMap,
|
||||||
outside: c.Outside,
|
outside: c.Outside,
|
||||||
@@ -75,112 +119,147 @@ func NewInterface(c *InterfaceConfig) (*Interface, error) {
|
|||||||
handshakeManager: c.HandshakeManager,
|
handshakeManager: c.HandshakeManager,
|
||||||
createTime: time.Now(),
|
createTime: time.Now(),
|
||||||
lightHouse: c.lightHouse,
|
lightHouse: c.lightHouse,
|
||||||
localBroadcast: ip2int(c.certState.certificate.Details.Ips[0].IP) | ^ip2int(c.certState.certificate.Details.Ips[0].Mask),
|
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].Mask),
|
||||||
dropLocalBroadcast: c.DropLocalBroadcast,
|
dropLocalBroadcast: c.DropLocalBroadcast,
|
||||||
dropMulticast: c.DropMulticast,
|
dropMulticast: c.DropMulticast,
|
||||||
udpBatchSize: c.UDPBatchSize,
|
routines: c.routines,
|
||||||
|
version: c.version,
|
||||||
|
writers: make([]*udp.Conn, c.routines),
|
||||||
|
readers: make([]io.ReadWriteCloser, c.routines),
|
||||||
|
caPool: c.caPool,
|
||||||
|
disconnectInvalid: c.disconnectInvalid,
|
||||||
|
psk: c.psk,
|
||||||
|
myVpnIp: myVpnIp,
|
||||||
|
|
||||||
|
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
||||||
|
|
||||||
metricRxRecvError: metrics.GetOrRegisterCounter("messages.rx.recv_error", nil),
|
|
||||||
metricTxRecvError: metrics.GetOrRegisterCounter("messages.tx.recv_error", nil),
|
|
||||||
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||||
|
messageMetrics: c.MessageMetrics,
|
||||||
|
cachedPacketMetrics: &cachedPacketMetrics{
|
||||||
|
sent: metrics.GetOrRegisterCounter("hostinfo.cached_packets.sent", nil),
|
||||||
|
dropped: metrics.GetOrRegisterCounter("hostinfo.cached_packets.dropped", nil),
|
||||||
|
},
|
||||||
|
|
||||||
|
l: c.l,
|
||||||
}
|
}
|
||||||
|
|
||||||
ifce.connectionManager = newConnectionManager(ifce, c.checkInterval, c.pendingDeletionInterval)
|
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval)
|
||||||
|
|
||||||
return ifce, nil
|
return ifce, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) Run(tunRoutines, udpRoutines int, buildVersion string) {
|
// activate creates the interface on the host. After the interface is created, any
|
||||||
|
// other services that want to bind listeners to its IP may do so successfully. However,
|
||||||
|
// the interface isn't going to process anything until run() is called.
|
||||||
|
func (f *Interface) activate() {
|
||||||
// actually turn on tun dev
|
// actually turn on tun dev
|
||||||
if err := f.inside.Activate(); err != nil {
|
|
||||||
l.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.version = buildVersion
|
|
||||||
addr, err := f.outside.LocalAddr()
|
addr, err := f.outside.LocalAddr()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Failed to get udp listen address")
|
f.l.WithError(err).Error("Failed to get udp listen address")
|
||||||
}
|
}
|
||||||
|
|
||||||
l.WithField("interface", f.inside.Device).WithField("network", f.inside.Cidr.String()).
|
f.l.WithField("interface", f.inside.DeviceName()).WithField("network", f.inside.CidrNet().String()).
|
||||||
WithField("build", buildVersion).WithField("udpAddr", addr).
|
WithField("build", f.version).WithField("udpAddr", addr).
|
||||||
Info("Nebula interface is active")
|
Info("Nebula interface is active")
|
||||||
|
|
||||||
|
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
|
||||||
|
|
||||||
|
// Prepare n tun queues
|
||||||
|
var reader io.ReadWriteCloser = f.inside
|
||||||
|
for i := 0; i < f.routines; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
reader, err = f.inside.NewMultiQueueReader()
|
||||||
|
if err != nil {
|
||||||
|
f.l.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.readers[i] = reader
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := f.inside.Activate(); err != nil {
|
||||||
|
f.l.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) run() {
|
||||||
// Launch n queues to read packets from udp
|
// Launch n queues to read packets from udp
|
||||||
for i := 0; i < udpRoutines; i++ {
|
for i := 0; i < f.routines; i++ {
|
||||||
go f.listenOut(i)
|
go f.listenOut(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Launch n queues to read packets from tun dev
|
// Launch n queues to read packets from tun dev
|
||||||
for i := 0; i < tunRoutines; i++ {
|
for i := 0; i < f.routines; i++ {
|
||||||
go f.listenIn(i)
|
go f.listenIn(f.readers[i], i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) listenOut(i int) {
|
func (f *Interface) listenOut(i int) {
|
||||||
//TODO: handle error
|
runtime.LockOSThread()
|
||||||
addr, err := f.outside.LocalAddr()
|
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).Error("failed to discover udp listening address")
|
|
||||||
}
|
|
||||||
|
|
||||||
var li *udpConn
|
var li *udp.Conn
|
||||||
|
// TODO clean this up with a coherent interface for each outside connection
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
//TODO: handle error
|
li = f.writers[i]
|
||||||
li, err = NewListener(udp2ip(addr).String(), int(addr.Port), i > 0)
|
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).Error("failed to make a new udp listener")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
li = f.outside
|
li = f.outside
|
||||||
}
|
}
|
||||||
|
|
||||||
li.ListenOut(f)
|
lhh := f.lightHouse.NewRequestHandler()
|
||||||
|
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||||
|
li.ListenOut(f.readOutsidePackets, lhh.HandleRequest, conntrackCache, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) listenIn(i int) {
|
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
||||||
|
runtime.LockOSThread()
|
||||||
|
|
||||||
packet := make([]byte, mtu)
|
packet := make([]byte, mtu)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
fwPacket := &FirewallPacket{}
|
fwPacket := &firewall.Packet{}
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
|
|
||||||
|
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
n, err := f.inside.Read(packet)
|
n, err := reader.Read(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Error while reading outbound packet")
|
f.l.WithError(err).Error("Error while reading outbound packet")
|
||||||
// This only seems to happen when something fatal happens to the fd, so exit.
|
// This only seems to happen when something fatal happens to the fd, so exit.
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.consumeInsidePacket(packet[:n], fwPacket, nb, out)
|
f.consumeInsidePacket(packet[:n], fwPacket, nb, out, i, conntrackCache.Get(f.l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) RegisterConfigChangeCallbacks(c *Config) {
|
func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
|
||||||
c.RegisterReloadCallback(f.reloadCA)
|
c.RegisterReloadCallback(f.reloadCA)
|
||||||
c.RegisterReloadCallback(f.reloadCertKey)
|
c.RegisterReloadCallback(f.reloadCertKey)
|
||||||
c.RegisterReloadCallback(f.reloadFirewall)
|
c.RegisterReloadCallback(f.reloadFirewall)
|
||||||
c.RegisterReloadCallback(f.outside.reloadConfig)
|
for _, udpConn := range f.writers {
|
||||||
|
c.RegisterReloadCallback(udpConn.ReloadConfig)
|
||||||
|
}
|
||||||
|
c.RegisterReloadCallback(f.reloadPSKs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) reloadCA(c *Config) {
|
func (f *Interface) reloadCA(c *config.C) {
|
||||||
// reload and check regardless
|
// reload and check regardless
|
||||||
// todo: need mutex?
|
// todo: need mutex?
|
||||||
newCAs, err := loadCAFromConfig(c)
|
newCAs, err := loadCAFromConfig(f.l, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Could not refresh trusted CA certificates")
|
f.l.WithError(err).Error("Could not refresh trusted CA certificates")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
trustedCAs = newCAs
|
f.caPool = newCAs
|
||||||
l.WithField("fingerprints", trustedCAs.GetFingerprints()).Info("Trusted CA certificates refreshed")
|
f.l.WithField("fingerprints", f.caPool.GetFingerprints()).Info("Trusted CA certificates refreshed")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) reloadCertKey(c *Config) {
|
func (f *Interface) reloadCertKey(c *config.C) {
|
||||||
// reload and check in all cases
|
// reload and check in all cases
|
||||||
cs, err := NewCertStateFromConfig(c)
|
cs, err := NewCertStateFromConfig(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Could not refresh client cert")
|
f.l.WithError(err).Error("Could not refresh client cert")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,40 +267,80 @@ func (f *Interface) reloadCertKey(c *Config) {
|
|||||||
oldIPs := f.certState.certificate.Details.Ips
|
oldIPs := f.certState.certificate.Details.Ips
|
||||||
newIPs := cs.certificate.Details.Ips
|
newIPs := cs.certificate.Details.Ips
|
||||||
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
|
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
|
||||||
l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
|
f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.certState = cs
|
f.certState = cs
|
||||||
l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
|
f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) reloadFirewall(c *Config) {
|
func (f *Interface) reloadFirewall(c *config.C) {
|
||||||
//TODO: need to trigger/detect if the certificate changed too
|
//TODO: need to trigger/detect if the certificate changed too
|
||||||
if c.HasChanged("firewall") == false {
|
if c.HasChanged("firewall") == false {
|
||||||
l.Debug("No firewall config change detected")
|
f.l.Debug("No firewall config change detected")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fw, err := NewFirewallFromConfig(f.certState.certificate, c)
|
fw, err := NewFirewallFromConfig(f.l, f.certState.certificate, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Error while creating firewall during reload")
|
f.l.WithError(err).Error("Error while creating firewall during reload")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
oldFw := f.firewall
|
oldFw := f.firewall
|
||||||
|
conntrack := oldFw.Conntrack
|
||||||
|
conntrack.Lock()
|
||||||
|
defer conntrack.Unlock()
|
||||||
|
|
||||||
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
// If rulesVersion is back to zero, we have wrapped all the way around. Be
|
||||||
|
// safe and just reset conntrack in this case.
|
||||||
|
if fw.rulesVersion == 0 {
|
||||||
|
f.l.WithField("firewallHash", fw.GetRuleHash()).
|
||||||
|
WithField("oldFirewallHash", oldFw.GetRuleHash()).
|
||||||
|
WithField("rulesVersion", fw.rulesVersion).
|
||||||
|
Warn("firewall rulesVersion has overflowed, resetting conntrack")
|
||||||
|
} else {
|
||||||
|
fw.Conntrack = conntrack
|
||||||
|
}
|
||||||
|
|
||||||
f.firewall = fw
|
f.firewall = fw
|
||||||
|
|
||||||
oldFw.Destroy()
|
oldFw.Destroy()
|
||||||
l.WithField("firewallHash", fw.GetRuleHash()).
|
f.l.WithField("firewallHash", fw.GetRuleHash()).
|
||||||
WithField("oldFirewallHash", oldFw.GetRuleHash()).
|
WithField("oldFirewallHash", oldFw.GetRuleHash()).
|
||||||
|
WithField("rulesVersion", fw.rulesVersion).
|
||||||
Info("New firewall has been installed")
|
Info("New firewall has been installed")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) emitStats(i time.Duration) {
|
func (f *Interface) reloadPSKs(c *config.C) {
|
||||||
|
psk, err := NewPskFromConfig(c, f.myVpnIp)
|
||||||
|
if err != nil {
|
||||||
|
f.l.WithError(err).Error("Error while reloading PSKs")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&f.psk)), unsafe.Pointer(psk))
|
||||||
|
|
||||||
|
f.l.WithField("pskMode", psk.mode).WithField("keysLen", len(psk.Cache)).
|
||||||
|
Info("New psks are in use")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||||
ticker := time.NewTicker(i)
|
ticker := time.NewTicker(i)
|
||||||
for range ticker.C {
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
f.firewall.EmitStats()
|
f.firewall.EmitStats()
|
||||||
f.handshakeManager.EmitStats()
|
f.handshakeManager.EmitStats()
|
||||||
|
udpStats()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
66
iputil/util.go
Normal file
66
iputil/util.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package iputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
type VpnIp uint32
|
||||||
|
|
||||||
|
const maxIPv4StringLen = len("255.255.255.255")
|
||||||
|
|
||||||
|
func (ip VpnIp) String() string {
|
||||||
|
b := make([]byte, maxIPv4StringLen)
|
||||||
|
|
||||||
|
n := ubtoa(b, 0, byte(ip>>24))
|
||||||
|
b[n] = '.'
|
||||||
|
n++
|
||||||
|
|
||||||
|
n += ubtoa(b, n, byte(ip>>16&255))
|
||||||
|
b[n] = '.'
|
||||||
|
n++
|
||||||
|
|
||||||
|
n += ubtoa(b, n, byte(ip>>8&255))
|
||||||
|
b[n] = '.'
|
||||||
|
n++
|
||||||
|
|
||||||
|
n += ubtoa(b, n, byte(ip&255))
|
||||||
|
return string(b[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ip VpnIp) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(fmt.Sprintf("\"%s\"", ip.String())), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ip VpnIp) ToIP() net.IP {
|
||||||
|
nip := make(net.IP, 4)
|
||||||
|
binary.BigEndian.PutUint32(nip, uint32(ip))
|
||||||
|
return nip
|
||||||
|
}
|
||||||
|
|
||||||
|
func Ip2VpnIp(ip []byte) VpnIp {
|
||||||
|
if len(ip) == 16 {
|
||||||
|
return VpnIp(binary.BigEndian.Uint32(ip[12:16]))
|
||||||
|
}
|
||||||
|
return VpnIp(binary.BigEndian.Uint32(ip))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ubtoa encodes the string form of the integer v to dst[start:] and
|
||||||
|
// returns the number of bytes written to dst. The caller must ensure
|
||||||
|
// that dst has sufficient length.
|
||||||
|
func ubtoa(dst []byte, start int, v byte) int {
|
||||||
|
if v < 10 {
|
||||||
|
dst[start] = v + '0'
|
||||||
|
return 1
|
||||||
|
} else if v < 100 {
|
||||||
|
dst[start+1] = v%10 + '0'
|
||||||
|
dst[start] = v/10 + '0'
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[start+2] = v%10 + '0'
|
||||||
|
dst[start+1] = (v/10)%10 + '0'
|
||||||
|
dst[start] = v/100 + '0'
|
||||||
|
return 3
|
||||||
|
}
|
||||||
17
iputil/util_test.go
Normal file
17
iputil/util_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package iputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVpnIp_String(t *testing.T) {
|
||||||
|
assert.Equal(t, "255.255.255.255", Ip2VpnIp(net.ParseIP("255.255.255.255")).String())
|
||||||
|
assert.Equal(t, "1.255.255.255", Ip2VpnIp(net.ParseIP("1.255.255.255")).String())
|
||||||
|
assert.Equal(t, "1.1.255.255", Ip2VpnIp(net.ParseIP("1.1.255.255")).String())
|
||||||
|
assert.Equal(t, "1.1.1.255", Ip2VpnIp(net.ParseIP("1.1.1.255")).String())
|
||||||
|
assert.Equal(t, "1.1.1.1", Ip2VpnIp(net.ParseIP("1.1.1.1")).String())
|
||||||
|
assert.Equal(t, "0.0.0.0", Ip2VpnIp(net.ParseIP("0.0.0.0")).String())
|
||||||
|
}
|
||||||
703
lighthouse.go
703
lighthouse.go
@@ -1,61 +1,89 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/rcrowley/go-metrics"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: if a lighthouse doesn't have an answer, clients AGGRESSIVELY REQUERY.. why? handshake manager and/or getOrHandshake?
|
||||||
|
//TODO: nodes are roaming lighthouses, this is bad. How are they learning?
|
||||||
|
|
||||||
|
var ErrHostNotKnown = errors.New("host not known")
|
||||||
|
|
||||||
type LightHouse struct {
|
type LightHouse struct {
|
||||||
|
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
|
||||||
sync.RWMutex //Because we concurrently read and write to our maps
|
sync.RWMutex //Because we concurrently read and write to our maps
|
||||||
amLighthouse bool
|
amLighthouse bool
|
||||||
myIp uint32
|
myVpnIp iputil.VpnIp
|
||||||
punchConn *udpConn
|
myVpnZeros iputil.VpnIp
|
||||||
|
punchConn *udp.Conn
|
||||||
|
|
||||||
// Local cache of answers from light houses
|
// Local cache of answers from light houses
|
||||||
addrMap map[uint32][]udpAddr
|
// map of vpn Ip to answers
|
||||||
|
addrMap map[iputil.VpnIp]*RemoteList
|
||||||
|
|
||||||
// filters remote addresses allowed for each host
|
// filters remote addresses allowed for each host
|
||||||
// - When we are a lighthouse, this filters what addresses we store and
|
// - When we are a lighthouse, this filters what addresses we store and
|
||||||
// respond with.
|
// respond with.
|
||||||
// - When we are not a lighthouse, this filters which addresses we accept
|
// - When we are not a lighthouse, this filters which addresses we accept
|
||||||
// from lighthouses.
|
// from lighthouses.
|
||||||
remoteAllowList *AllowList
|
remoteAllowList *RemoteAllowList
|
||||||
|
|
||||||
// filters local addresses that we advertise to lighthouses
|
// filters local addresses that we advertise to lighthouses
|
||||||
localAllowList *AllowList
|
localAllowList *LocalAllowList
|
||||||
|
|
||||||
|
// used to trigger the HandshakeManager when we receive HostQueryReply
|
||||||
|
handshakeTrigger chan<- iputil.VpnIp
|
||||||
|
|
||||||
// staticList exists to avoid having a bool in each addrMap entry
|
// staticList exists to avoid having a bool in each addrMap entry
|
||||||
// since static should be rare
|
// since static should be rare
|
||||||
staticList map[uint32]struct{}
|
staticList map[iputil.VpnIp]struct{}
|
||||||
lighthouses map[uint32]struct{}
|
lighthouses map[iputil.VpnIp]struct{}
|
||||||
interval int
|
interval int
|
||||||
nebulaPort int
|
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
||||||
punchBack bool
|
punchBack bool
|
||||||
punchDelay time.Duration
|
punchDelay time.Duration
|
||||||
|
|
||||||
|
metrics *MessageMetrics
|
||||||
|
metricHolepunchTx metrics.Counter
|
||||||
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
type EncWriter interface {
|
func NewLightHouse(l *logrus.Logger, amLighthouse bool, myVpnIpNet *net.IPNet, ips []iputil.VpnIp, interval int, nebulaPort uint32, pc *udp.Conn, punchBack bool, punchDelay time.Duration, metricsEnabled bool) *LightHouse {
|
||||||
SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte)
|
ones, _ := myVpnIpNet.Mask.Size()
|
||||||
SendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLightHouse(amLighthouse bool, myIp uint32, ips []uint32, interval int, nebulaPort int, pc *udpConn, punchBack bool, punchDelay time.Duration) *LightHouse {
|
|
||||||
h := LightHouse{
|
h := LightHouse{
|
||||||
amLighthouse: amLighthouse,
|
amLighthouse: amLighthouse,
|
||||||
myIp: myIp,
|
myVpnIp: iputil.Ip2VpnIp(myVpnIpNet.IP),
|
||||||
addrMap: make(map[uint32][]udpAddr),
|
myVpnZeros: iputil.VpnIp(32 - ones),
|
||||||
|
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
||||||
nebulaPort: nebulaPort,
|
nebulaPort: nebulaPort,
|
||||||
lighthouses: make(map[uint32]struct{}),
|
lighthouses: make(map[iputil.VpnIp]struct{}),
|
||||||
staticList: make(map[uint32]struct{}),
|
staticList: make(map[iputil.VpnIp]struct{}),
|
||||||
interval: interval,
|
interval: interval,
|
||||||
punchConn: pc,
|
punchConn: pc,
|
||||||
punchBack: punchBack,
|
punchBack: punchBack,
|
||||||
punchDelay: punchDelay,
|
punchDelay: punchDelay,
|
||||||
|
l: l,
|
||||||
|
}
|
||||||
|
|
||||||
|
if metricsEnabled {
|
||||||
|
h.metrics = newLighthouseMetrics()
|
||||||
|
|
||||||
|
h.metricHolepunchTx = metrics.GetOrRegisterCounter("messages.tx.holepunch", nil)
|
||||||
|
} else {
|
||||||
|
h.metricHolepunchTx = metrics.NilCounter{}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ip := range ips {
|
for _, ip := range ips {
|
||||||
@@ -65,14 +93,14 @@ func NewLightHouse(amLighthouse bool, myIp uint32, ips []uint32, interval int, n
|
|||||||
return &h
|
return &h
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) SetRemoteAllowList(allowList *AllowList) {
|
func (lh *LightHouse) SetRemoteAllowList(allowList *RemoteAllowList) {
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
defer lh.Unlock()
|
defer lh.Unlock()
|
||||||
|
|
||||||
lh.remoteAllowList = allowList
|
lh.remoteAllowList = allowList
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) SetLocalAllowList(allowList *AllowList) {
|
func (lh *LightHouse) SetLocalAllowList(allowList *LocalAllowList) {
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
defer lh.Unlock()
|
defer lh.Unlock()
|
||||||
|
|
||||||
@@ -82,45 +110,16 @@ func (lh *LightHouse) SetLocalAllowList(allowList *AllowList) {
|
|||||||
func (lh *LightHouse) ValidateLHStaticEntries() error {
|
func (lh *LightHouse) ValidateLHStaticEntries() error {
|
||||||
for lhIP, _ := range lh.lighthouses {
|
for lhIP, _ := range lh.lighthouses {
|
||||||
if _, ok := lh.staticList[lhIP]; !ok {
|
if _, ok := lh.staticList[lhIP]; !ok {
|
||||||
return fmt.Errorf("Lighthouse %s does not have a static_host_map entry", IntIp(lhIP))
|
return fmt.Errorf("Lighthouse %s does not have a static_host_map entry", lhIP)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) Query(ip uint32, f EncWriter) ([]udpAddr, error) {
|
func (lh *LightHouse) Query(ip iputil.VpnIp, f udp.EncWriter) *RemoteList {
|
||||||
if !lh.IsLighthouseIP(ip) {
|
if !lh.IsLighthouseIP(ip) {
|
||||||
lh.QueryServer(ip, f)
|
lh.QueryServer(ip, f)
|
||||||
}
|
}
|
||||||
lh.RLock()
|
|
||||||
if v, ok := lh.addrMap[ip]; ok {
|
|
||||||
lh.RUnlock()
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
lh.RUnlock()
|
|
||||||
return nil, fmt.Errorf("host %s not known, queries sent to lighthouses", IntIp(ip))
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is asynchronous so no reply should be expected
|
|
||||||
func (lh *LightHouse) QueryServer(ip uint32, f EncWriter) {
|
|
||||||
if !lh.amLighthouse {
|
|
||||||
// Send a query to the lighthouses and hope for the best next time
|
|
||||||
query, err := proto.Marshal(NewLhQueryByInt(ip))
|
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(ip)).Error("Failed to marshal lighthouse query payload")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
nb := make([]byte, 12, 12)
|
|
||||||
out := make([]byte, mtu)
|
|
||||||
for n := range lh.lighthouses {
|
|
||||||
f.SendMessageToVpnIp(lightHouse, 0, n, query, nb, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query our local lighthouse cached results
|
|
||||||
func (lh *LightHouse) QueryCache(ip uint32) []udpAddr {
|
|
||||||
lh.RLock()
|
lh.RLock()
|
||||||
if v, ok := lh.addrMap[ip]; ok {
|
if v, ok := lh.addrMap[ip]; ok {
|
||||||
lh.RUnlock()
|
lh.RUnlock()
|
||||||
@@ -130,279 +129,525 @@ func (lh *LightHouse) QueryCache(ip uint32) []udpAddr {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) DeleteVpnIP(vpnIP uint32) {
|
// This is asynchronous so no reply should be expected
|
||||||
|
func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f udp.EncWriter) {
|
||||||
|
if lh.amLighthouse {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if lh.IsLighthouseIP(ip) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a query to the lighthouses and hope for the best next time
|
||||||
|
query, err := proto.Marshal(NewLhQueryByInt(ip))
|
||||||
|
if err != nil {
|
||||||
|
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lh.metricTx(NebulaMeta_HostQuery, int64(len(lh.lighthouses)))
|
||||||
|
nb := make([]byte, 12, 12)
|
||||||
|
out := make([]byte, mtu)
|
||||||
|
for n := range lh.lighthouses {
|
||||||
|
f.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList {
|
||||||
|
lh.RLock()
|
||||||
|
if v, ok := lh.addrMap[ip]; ok {
|
||||||
|
lh.RUnlock()
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
lh.RUnlock()
|
||||||
|
|
||||||
|
lh.Lock()
|
||||||
|
defer lh.Unlock()
|
||||||
|
// Add an entry if we don't already have one
|
||||||
|
return lh.unlockedGetRemoteList(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing
|
||||||
|
// details from the remote list. It looks for a hit in the addrMap and a hit in the RemoteList under the owner vpnIp
|
||||||
|
// If one is found then f() is called with proper locking, f() must return result of n.MarshalTo()
|
||||||
|
func (lh *LightHouse) queryAndPrepMessage(vpnIp iputil.VpnIp, f func(*cache) (int, error)) (bool, int, error) {
|
||||||
|
lh.RLock()
|
||||||
|
// Do we have an entry in the main cache?
|
||||||
|
if v, ok := lh.addrMap[vpnIp]; ok {
|
||||||
|
// Swap lh lock for remote list lock
|
||||||
|
v.RLock()
|
||||||
|
defer v.RUnlock()
|
||||||
|
|
||||||
|
lh.RUnlock()
|
||||||
|
|
||||||
|
// vpnIp should also be the owner here since we are a lighthouse.
|
||||||
|
c := v.cache[vpnIp]
|
||||||
|
// Make sure we have
|
||||||
|
if c != nil {
|
||||||
|
n, err := f(c)
|
||||||
|
return true, n, err
|
||||||
|
}
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
lh.RUnlock()
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
|
||||||
// First we check the static mapping
|
// First we check the static mapping
|
||||||
// and do nothing if it is there
|
// and do nothing if it is there
|
||||||
if _, ok := lh.staticList[vpnIP]; ok {
|
if _, ok := lh.staticList[vpnIp]; ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
//l.Debugln(lh.addrMap)
|
//l.Debugln(lh.addrMap)
|
||||||
delete(lh.addrMap, vpnIP)
|
delete(lh.addrMap, vpnIp)
|
||||||
l.Debugf("deleting %s from lighthouse.", IntIp(vpnIP))
|
|
||||||
|
if lh.l.Level >= logrus.DebugLevel {
|
||||||
|
lh.l.Debugf("deleting %s from lighthouse.", vpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
lh.Unlock()
|
lh.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) AddRemote(vpnIP uint32, toIp *udpAddr, static bool) {
|
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner
|
||||||
// First we check if the sender thinks this is a static entry
|
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
|
||||||
// and do nothing if it is not, but should be considered static
|
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
|
||||||
if static == false {
|
func (lh *LightHouse) AddStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr) {
|
||||||
if _, ok := lh.staticList[vpnIP]; ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
for _, v := range lh.addrMap[vpnIP] {
|
am := lh.unlockedGetRemoteList(vpnIp)
|
||||||
if v.Equals(toIp) {
|
am.Lock()
|
||||||
|
defer am.Unlock()
|
||||||
lh.Unlock()
|
lh.Unlock()
|
||||||
|
|
||||||
|
if ipv4 := toAddr.IP.To4(); ipv4 != nil {
|
||||||
|
to := NewIp4AndPort(ipv4, uint32(toAddr.Port))
|
||||||
|
if !lh.unlockedShouldAddV4(vpnIp, to) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
am.unlockedPrependV4(lh.myVpnIp, to)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
to := NewIp6AndPort(toAddr.IP, uint32(toAddr.Port))
|
||||||
|
if !lh.unlockedShouldAddV6(vpnIp, to) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
am.unlockedPrependV6(lh.myVpnIp, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
allow := lh.remoteAllowList.Allow(udp2ipInt(toIp))
|
// Mark it as static
|
||||||
l.WithField("remoteIp", toIp).WithField("allow", allow).Debug("remoteAllowList.Allow")
|
lh.staticList[vpnIp] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedGetRemoteList assumes you have the lh lock
|
||||||
|
func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
|
||||||
|
am, ok := lh.addrMap[vpnIp]
|
||||||
|
if !ok {
|
||||||
|
am = NewRemoteList()
|
||||||
|
lh.addrMap[vpnIp] = am
|
||||||
|
}
|
||||||
|
return am
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedShouldAddV4 checks if to is allowed by our allow list
|
||||||
|
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
|
||||||
|
allow := lh.remoteAllowList.AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
|
||||||
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
|
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.VpnIp(to.Ip)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedShouldAddV6 checks if to is allowed by our allow list
|
||||||
|
func (lh *LightHouse) unlockedShouldAddV6(vpnIp iputil.VpnIp, to *Ip6AndPort) bool {
|
||||||
|
allow := lh.remoteAllowList.AllowIpV6(vpnIp, to.Hi, to.Lo)
|
||||||
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
|
lh.l.WithField("remoteIp", lhIp6ToIp(to)).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't check our vpn network here because nebula does not support ipv6 on the inside
|
||||||
if !allow {
|
if !allow {
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//l.Debugf("Adding reply of %s as %s\n", IntIp(vpnIP), toIp)
|
return true
|
||||||
if static {
|
|
||||||
lh.staticList[vpnIP] = struct{}{}
|
|
||||||
}
|
|
||||||
lh.addrMap[vpnIP] = append(lh.addrMap[vpnIP], *toIp)
|
|
||||||
lh.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) AddRemoteAndReset(vpnIP uint32, toIp *udpAddr) {
|
func lhIp6ToIp(v *Ip6AndPort) net.IP {
|
||||||
if lh.amLighthouse {
|
ip := make(net.IP, 16)
|
||||||
lh.DeleteVpnIP(vpnIP)
|
binary.BigEndian.PutUint64(ip[:8], v.Hi)
|
||||||
lh.AddRemote(vpnIP, toIp, false)
|
binary.BigEndian.PutUint64(ip[8:], v.Lo)
|
||||||
}
|
return ip
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) IsLighthouseIP(vpnIP uint32) bool {
|
func (lh *LightHouse) IsLighthouseIP(vpnIp iputil.VpnIp) bool {
|
||||||
if _, ok := lh.lighthouses[vpnIP]; ok {
|
if _, ok := lh.lighthouses[vpnIp]; ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quick generators for protobuf
|
func NewLhQueryByInt(VpnIp iputil.VpnIp) *NebulaMeta {
|
||||||
|
|
||||||
func NewLhQueryByIpString(VpnIp string) *NebulaMeta {
|
|
||||||
return NewLhQueryByInt(ip2int(net.ParseIP(VpnIp)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLhQueryByInt(VpnIp uint32) *NebulaMeta {
|
|
||||||
return &NebulaMeta{
|
return &NebulaMeta{
|
||||||
Type: NebulaMeta_HostQuery,
|
Type: NebulaMeta_HostQuery,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
VpnIp: VpnIp,
|
VpnIp: uint32(VpnIp),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLhWhoami() *NebulaMeta {
|
func NewIp4AndPort(ip net.IP, port uint32) *Ip4AndPort {
|
||||||
return &NebulaMeta{
|
ipp := Ip4AndPort{Port: port}
|
||||||
Type: NebulaMeta_HostWhoami,
|
ipp.Ip = uint32(iputil.Ip2VpnIp(ip))
|
||||||
Details: &NebulaMetaDetails{},
|
return &ipp
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
||||||
|
return &Ip6AndPort{
|
||||||
|
Hi: binary.BigEndian.Uint64(ip[:8]),
|
||||||
|
Lo: binary.BigEndian.Uint64(ip[8:]),
|
||||||
|
Port: port,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// End Quick generators for protobuf
|
func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr {
|
||||||
|
ip := ipp.Ip
|
||||||
func NewIpAndPortFromUDPAddr(addr udpAddr) *IpAndPort {
|
return udp.NewAddr(
|
||||||
return &IpAndPort{Ip: udp2ipInt(&addr), Port: uint32(addr.Port)}
|
net.IPv4(byte(ip&0xff000000>>24), byte(ip&0x00ff0000>>16), byte(ip&0x0000ff00>>8), byte(ip&0x000000ff)),
|
||||||
|
uint16(ipp.Port),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIpAndPortsFromNetIps(ips []udpAddr) *[]*IpAndPort {
|
func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
|
||||||
var iap []*IpAndPort
|
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
|
||||||
for _, e := range ips {
|
|
||||||
// Only add IPs that aren't my VPN/tun IP
|
|
||||||
iap = append(iap, NewIpAndPortFromUDPAddr(e))
|
|
||||||
}
|
|
||||||
return &iap
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) LhUpdateWorker(f EncWriter) {
|
func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f udp.EncWriter) {
|
||||||
if lh.amLighthouse || lh.interval == 0 {
|
if lh.amLighthouse || lh.interval == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
clockSource := time.NewTicker(time.Second * time.Duration(lh.interval))
|
||||||
ipp := []*IpAndPort{}
|
defer clockSource.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
lh.SendUpdate(f)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-clockSource.C:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) SendUpdate(f udp.EncWriter) {
|
||||||
|
var v4 []*Ip4AndPort
|
||||||
|
var v6 []*Ip6AndPort
|
||||||
|
|
||||||
|
for _, e := range *localIps(lh.l, lh.localAllowList) {
|
||||||
|
if ip4 := e.To4(); ip4 != nil && ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.Ip2VpnIp(ip4)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
for _, e := range *localIps(lh.localAllowList) {
|
|
||||||
// Only add IPs that aren't my VPN/tun IP
|
// Only add IPs that aren't my VPN/tun IP
|
||||||
if ip2int(e) != lh.myIp {
|
if ip := e.To4(); ip != nil {
|
||||||
ipp = append(ipp, &IpAndPort{Ip: ip2int(e), Port: uint32(lh.nebulaPort)})
|
v4 = append(v4, NewIp4AndPort(e, lh.nebulaPort))
|
||||||
//fmt.Println(e)
|
} else {
|
||||||
|
v6 = append(v6, NewIp6AndPort(e, lh.nebulaPort))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m := &NebulaMeta{
|
m := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostUpdateNotification,
|
Type: NebulaMeta_HostUpdateNotification,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
VpnIp: lh.myIp,
|
VpnIp: uint32(lh.myVpnIp),
|
||||||
IpAndPorts: ipp,
|
Ip4AndPorts: v4,
|
||||||
|
Ip6AndPorts: v6,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lh.metricTx(NebulaMeta_HostUpdateNotification, int64(len(lh.lighthouses)))
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
for vpnIp := range lh.lighthouses {
|
|
||||||
mm, err := proto.Marshal(m)
|
mm, err := proto.Marshal(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugf("Invalid marshal to update")
|
lh.l.WithError(err).Error("Error while marshaling for lighthouse update")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
//l.Error("LIGHTHOUSE PACKET SEND", mm)
|
|
||||||
f.SendMessageToVpnIp(lightHouse, 0, vpnIp, mm, nb, out)
|
|
||||||
|
|
||||||
}
|
for vpnIp := range lh.lighthouses {
|
||||||
time.Sleep(time.Second * time.Duration(lh.interval))
|
f.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) HandleRequest(rAddr *udpAddr, vpnIp uint32, p []byte, c *cert.NebulaCertificate, f EncWriter) {
|
type LightHouseHandler struct {
|
||||||
n := &NebulaMeta{}
|
lh *LightHouse
|
||||||
err := proto.Unmarshal(p, n)
|
nb []byte
|
||||||
|
out []byte
|
||||||
|
pb []byte
|
||||||
|
meta *NebulaMeta
|
||||||
|
l *logrus.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) NewRequestHandler() *LightHouseHandler {
|
||||||
|
lhh := &LightHouseHandler{
|
||||||
|
lh: lh,
|
||||||
|
nb: make([]byte, 12, 12),
|
||||||
|
out: make([]byte, mtu),
|
||||||
|
l: lh.l,
|
||||||
|
pb: make([]byte, mtu),
|
||||||
|
|
||||||
|
meta: &NebulaMeta{
|
||||||
|
Details: &NebulaMetaDetails{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return lhh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) metricRx(t NebulaMeta_MessageType, i int64) {
|
||||||
|
lh.metrics.Rx(header.MessageType(t), 0, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lh *LightHouse) metricTx(t NebulaMeta_MessageType, i int64) {
|
||||||
|
lh.metrics.Tx(header.MessageType(t), 0, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This method is similar to Reset(), but it re-uses the pointer structs
|
||||||
|
// so that we don't have to re-allocate them
|
||||||
|
func (lhh *LightHouseHandler) resetMeta() *NebulaMeta {
|
||||||
|
details := lhh.meta.Details
|
||||||
|
lhh.meta.Reset()
|
||||||
|
|
||||||
|
// Keep the array memory around
|
||||||
|
details.Ip4AndPorts = details.Ip4AndPorts[:0]
|
||||||
|
details.Ip6AndPorts = details.Ip6AndPorts[:0]
|
||||||
|
lhh.meta.Details = details
|
||||||
|
|
||||||
|
return lhh.meta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w udp.EncWriter) {
|
||||||
|
n := lhh.resetMeta()
|
||||||
|
err := n.Unmarshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).WithField("udpAddr", rAddr).
|
lhh.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", rAddr).
|
||||||
Error("Failed to unmarshal lighthouse packet")
|
Error("Failed to unmarshal lighthouse packet")
|
||||||
//TODO: send recv_error?
|
//TODO: send recv_error?
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.Details == nil {
|
if n.Details == nil {
|
||||||
l.WithField("vpnIp", IntIp(vpnIp)).WithField("udpAddr", rAddr).
|
lhh.l.WithField("vpnIp", vpnIp).WithField("udpAddr", rAddr).
|
||||||
Error("Invalid lighthouse update")
|
Error("Invalid lighthouse update")
|
||||||
//TODO: send recv_error?
|
//TODO: send recv_error?
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lhh.lh.metricRx(n.Type, 1)
|
||||||
|
|
||||||
switch n.Type {
|
switch n.Type {
|
||||||
case NebulaMeta_HostQuery:
|
case NebulaMeta_HostQuery:
|
||||||
// Exit if we don't answer queries
|
lhh.handleHostQuery(n, vpnIp, rAddr, w)
|
||||||
if !lh.amLighthouse {
|
|
||||||
l.Debugln("I don't answer queries, but received from: ", rAddr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//l.Debugln("Got Query")
|
|
||||||
ips, err := lh.Query(n.Details.VpnIp, f)
|
|
||||||
if err != nil {
|
|
||||||
//l.Debugf("Can't answer query %s from %s because error: %s", IntIp(n.Details.VpnIp), rAddr, err)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
iap := NewIpAndPortsFromNetIps(ips)
|
|
||||||
answer := &NebulaMeta{
|
|
||||||
Type: NebulaMeta_HostQueryReply,
|
|
||||||
Details: &NebulaMetaDetails{
|
|
||||||
VpnIp: n.Details.VpnIp,
|
|
||||||
IpAndPorts: *iap,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
reply, err := proto.Marshal(answer)
|
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).Error("Failed to marshal lighthouse host query reply")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.SendMessageToVpnIp(lightHouse, 0, vpnIp, reply, make([]byte, 12, 12), make([]byte, mtu))
|
|
||||||
|
|
||||||
// This signals the other side to punch some zero byte udp packets
|
|
||||||
ips, err = lh.Query(vpnIp, f)
|
|
||||||
if err != nil {
|
|
||||||
l.WithField("vpnIp", IntIp(vpnIp)).Debugln("Can't notify host to punch")
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
//l.Debugln("Notify host to punch", iap)
|
|
||||||
iap = NewIpAndPortsFromNetIps(ips)
|
|
||||||
answer = &NebulaMeta{
|
|
||||||
Type: NebulaMeta_HostPunchNotification,
|
|
||||||
Details: &NebulaMetaDetails{
|
|
||||||
VpnIp: vpnIp,
|
|
||||||
IpAndPorts: *iap,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
reply, _ := proto.Marshal(answer)
|
|
||||||
f.SendMessageToVpnIp(lightHouse, 0, n.Details.VpnIp, reply, make([]byte, 12, 12), make([]byte, mtu))
|
|
||||||
}
|
|
||||||
//fmt.Println(reply, remoteaddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
case NebulaMeta_HostQueryReply:
|
case NebulaMeta_HostQueryReply:
|
||||||
if !lh.IsLighthouseIP(vpnIp) {
|
lhh.handleHostQueryReply(n, vpnIp)
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, a := range n.Details.IpAndPorts {
|
|
||||||
//first := n.Details.IpAndPorts[0]
|
|
||||||
ans := NewUDPAddr(a.Ip, uint16(a.Port))
|
|
||||||
lh.AddRemote(n.Details.VpnIp, ans, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
case NebulaMeta_HostUpdateNotification:
|
case NebulaMeta_HostUpdateNotification:
|
||||||
//Simple check that the host sent this not someone else
|
lhh.handleHostUpdateNotification(n, vpnIp)
|
||||||
if n.Details.VpnIp != vpnIp {
|
|
||||||
l.WithField("vpnIp", IntIp(vpnIp)).WithField("answer", IntIp(n.Details.VpnIp)).Debugln("Host sent invalid update")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, a := range n.Details.IpAndPorts {
|
|
||||||
ans := NewUDPAddr(a.Ip, uint16(a.Port))
|
|
||||||
lh.AddRemote(n.Details.VpnIp, ans, false)
|
|
||||||
}
|
|
||||||
case NebulaMeta_HostMovedNotification:
|
case NebulaMeta_HostMovedNotification:
|
||||||
case NebulaMeta_HostPunchNotification:
|
case NebulaMeta_HostPunchNotification:
|
||||||
if !lh.IsLighthouseIP(vpnIp) {
|
lhh.handleHostPunchNotification(n, vpnIp, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w udp.EncWriter) {
|
||||||
|
// Exit if we don't answer queries
|
||||||
|
if !lhh.lh.amLighthouse {
|
||||||
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
|
lhh.l.Debugln("I don't answer queries, but received from: ", addr)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: we can DRY this further
|
||||||
|
reqVpnIp := n.Details.VpnIp
|
||||||
|
//TODO: Maybe instead of marshalling into n we marshal into a new `r` to not nuke our current request data
|
||||||
|
found, ln, err := lhh.lh.queryAndPrepMessage(iputil.VpnIp(n.Details.VpnIp), func(c *cache) (int, error) {
|
||||||
|
n = lhh.resetMeta()
|
||||||
|
n.Type = NebulaMeta_HostQueryReply
|
||||||
|
n.Details.VpnIp = reqVpnIp
|
||||||
|
|
||||||
|
lhh.coalesceAnswers(c, n)
|
||||||
|
|
||||||
|
return n.MarshalTo(lhh.pb)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host query reply")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lhh.lh.metricTx(NebulaMeta_HostQueryReply, 1)
|
||||||
|
w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
||||||
|
|
||||||
|
// This signals the other side to punch some zero byte udp packets
|
||||||
|
found, ln, err = lhh.lh.queryAndPrepMessage(vpnIp, func(c *cache) (int, error) {
|
||||||
|
n = lhh.resetMeta()
|
||||||
|
n.Type = NebulaMeta_HostPunchNotification
|
||||||
|
n.Details.VpnIp = uint32(vpnIp)
|
||||||
|
|
||||||
|
lhh.coalesceAnswers(c, n)
|
||||||
|
|
||||||
|
return n.MarshalTo(lhh.pb)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host was queried for")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lhh.lh.metricTx(NebulaMeta_HostPunchNotification, 1)
|
||||||
|
w.SendMessageToVpnIp(header.LightHouse, 0, iputil.VpnIp(reqVpnIp), lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) coalesceAnswers(c *cache, n *NebulaMeta) {
|
||||||
|
if c.v4 != nil {
|
||||||
|
if c.v4.learned != nil {
|
||||||
|
n.Details.Ip4AndPorts = append(n.Details.Ip4AndPorts, c.v4.learned)
|
||||||
|
}
|
||||||
|
if c.v4.reported != nil && len(c.v4.reported) > 0 {
|
||||||
|
n.Details.Ip4AndPorts = append(n.Details.Ip4AndPorts, c.v4.reported...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.v6 != nil {
|
||||||
|
if c.v6.learned != nil {
|
||||||
|
n.Details.Ip6AndPorts = append(n.Details.Ip6AndPorts, c.v6.learned)
|
||||||
|
}
|
||||||
|
if c.v6.reported != nil && len(c.v6.reported) > 0 {
|
||||||
|
n.Details.Ip6AndPorts = append(n.Details.Ip6AndPorts, c.v6.reported...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, vpnIp iputil.VpnIp) {
|
||||||
|
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lhh.lh.Lock()
|
||||||
|
am := lhh.lh.unlockedGetRemoteList(iputil.VpnIp(n.Details.VpnIp))
|
||||||
|
am.Lock()
|
||||||
|
lhh.lh.Unlock()
|
||||||
|
|
||||||
|
certVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
|
am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
|
||||||
|
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
||||||
|
am.Unlock()
|
||||||
|
|
||||||
|
// Non-blocking attempt to trigger, skip if it would block
|
||||||
|
select {
|
||||||
|
case lhh.lh.handshakeTrigger <- iputil.VpnIp(n.Details.VpnIp):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp) {
|
||||||
|
if !lhh.lh.amLighthouse {
|
||||||
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
|
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//Simple check that the host sent this not someone else
|
||||||
|
if n.Details.VpnIp != uint32(vpnIp) {
|
||||||
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
|
lhh.l.WithField("vpnIp", vpnIp).WithField("answer", iputil.VpnIp(n.Details.VpnIp)).Debugln("Host sent invalid update")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lhh.lh.Lock()
|
||||||
|
am := lhh.lh.unlockedGetRemoteList(vpnIp)
|
||||||
|
am.Lock()
|
||||||
|
lhh.lh.Unlock()
|
||||||
|
|
||||||
|
certVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
|
am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
|
||||||
|
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
||||||
|
am.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w udp.EncWriter) {
|
||||||
|
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
empty := []byte{0}
|
empty := []byte{0}
|
||||||
for _, a := range n.Details.IpAndPorts {
|
punch := func(vpnPeer *udp.Addr) {
|
||||||
vpnPeer := NewUDPAddr(a.Ip, uint16(a.Port))
|
if vpnPeer == nil {
|
||||||
go func() {
|
return
|
||||||
time.Sleep(lh.punchDelay)
|
|
||||||
lh.punchConn.WriteTo(empty, vpnPeer)
|
|
||||||
|
|
||||||
}()
|
|
||||||
l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
time.Sleep(lhh.lh.punchDelay)
|
||||||
|
lhh.lh.metricHolepunchTx.Inc(1)
|
||||||
|
lhh.lh.punchConn.WriteTo(empty, vpnPeer)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
|
//TODO: lacking the ip we are actually punching on, old: l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp))
|
||||||
|
lhh.l.Debugf("Punching on %d for %s", vpnPeer.Port, iputil.VpnIp(n.Details.VpnIp))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range n.Details.Ip4AndPorts {
|
||||||
|
punch(NewUDPAddrFromLH4(a))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range n.Details.Ip6AndPorts {
|
||||||
|
punch(NewUDPAddrFromLH6(a))
|
||||||
|
}
|
||||||
|
|
||||||
// This sends a nebula test packet to the host trying to contact us. In the case
|
// This sends a nebula test packet to the host trying to contact us. In the case
|
||||||
// of a double nat or other difficult scenario, this may help establish
|
// of a double nat or other difficult scenario, this may help establish
|
||||||
// a tunnel.
|
// a tunnel.
|
||||||
if lh.punchBack {
|
if lhh.lh.punchBack {
|
||||||
|
queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(time.Second * 5)
|
time.Sleep(time.Second * 5)
|
||||||
l.Debugf("Sending a nebula test packet to vpn ip %s", IntIp(n.Details.VpnIp))
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
f.SendMessageToVpnIp(test, testRequest, n.Details.VpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
lhh.l.Debugf("Sending a nebula test packet to vpn ip %s", queryVpnIp)
|
||||||
|
}
|
||||||
|
//NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine
|
||||||
|
// for each punchBack packet. We should move this into a timerwheel or a single goroutine
|
||||||
|
// managed by a channel.
|
||||||
|
w.SendMessageToVpnIp(header.Test, header.TestRequest, queryVpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// ipMaskContains checks if testIp is contained by ip after applying a cidr
|
||||||
func (f *Interface) sendPathCheck(ci *ConnectionState, endpoint *net.UDPAddr, counter int) {
|
// zeros is 32 - bits from net.IPMask.Size()
|
||||||
c := ci.messageCounter
|
func ipMaskContains(ip iputil.VpnIp, zeros iputil.VpnIp, testIp iputil.VpnIp) bool {
|
||||||
b := HeaderEncode(nil, Version, uint8(path_check), 0, ci.remoteIndex, c)
|
return (testIp^ip)>>zeros == 0
|
||||||
ci.messageCounter++
|
|
||||||
|
|
||||||
if ci.eKey != nil {
|
|
||||||
msg := ci.eKey.EncryptDanger(b, nil, []byte(strconv.Itoa(counter)), c)
|
|
||||||
//msg := ci.eKey.EncryptDanger(b, nil, []byte(fmt.Sprintf("%d", counter)), c)
|
|
||||||
f.outside.WriteTo(msg, endpoint)
|
|
||||||
l.Debugf("path_check sent, remote index: %d, pathCounter %d", ci.remoteIndex, counter)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendPathCheckReply(ci *ConnectionState, endpoint *net.UDPAddr, counter []byte) {
|
|
||||||
c := ci.messageCounter
|
|
||||||
b := HeaderEncode(nil, Version, uint8(path_check_reply), 0, ci.remoteIndex, c)
|
|
||||||
ci.messageCounter++
|
|
||||||
|
|
||||||
if ci.eKey != nil {
|
|
||||||
msg := ci.eKey.EncryptDanger(b, nil, counter, c)
|
|
||||||
f.outside.WriteTo(msg, endpoint)
|
|
||||||
l.Debugln("path_check sent, remote index: ", ci.remoteIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|||||||
@@ -1,16 +1,32 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: Add a test to ensure udpAddr is copied and not reused
|
||||||
|
|
||||||
|
func TestOldIPv4Only(t *testing.T) {
|
||||||
|
// This test ensures our new ipv6 enabled LH protobuf IpAndPorts works with the old style to enable backwards compatibility
|
||||||
|
b := []byte{8, 129, 130, 132, 80, 16, 10}
|
||||||
|
var m Ip4AndPort
|
||||||
|
err := proto.Unmarshal(b, &m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "10.1.1.1", iputil.VpnIp(m.GetIp()).String())
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewLhQuery(t *testing.T) {
|
func TestNewLhQuery(t *testing.T) {
|
||||||
myIp := net.ParseIP("192.1.1.1")
|
myIp := net.ParseIP("192.1.1.1")
|
||||||
myIpint := ip2int(myIp)
|
myIpint := iputil.Ip2VpnIp(myIp)
|
||||||
|
|
||||||
// Generating a new lh query should work
|
// Generating a new lh query should work
|
||||||
a := NewLhQueryByInt(myIpint)
|
a := NewLhQueryByInt(myIpint)
|
||||||
@@ -29,70 +45,346 @@ func TestNewLhQuery(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewipandportfromudpaddr(t *testing.T) {
|
|
||||||
blah := NewUDPAddrFromString("1.2.2.3:12345")
|
|
||||||
meh := NewIpAndPortFromUDPAddr(*blah)
|
|
||||||
assert.Equal(t, uint32(16908803), meh.Ip)
|
|
||||||
assert.Equal(t, uint32(12345), meh.Port)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewipandportsfromudpaddrs(t *testing.T) {
|
|
||||||
blah := NewUDPAddrFromString("1.2.2.3:12345")
|
|
||||||
blah2 := NewUDPAddrFromString("9.9.9.9:47828")
|
|
||||||
group := []udpAddr{*blah, *blah2}
|
|
||||||
hah := NewIpAndPortsFromNetIps(group)
|
|
||||||
assert.IsType(t, &[]*IpAndPort{}, hah)
|
|
||||||
//t.Error(reflect.TypeOf(hah))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_lhStaticMapping(t *testing.T) {
|
func Test_lhStaticMapping(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
lh1 := "10.128.0.2"
|
lh1 := "10.128.0.2"
|
||||||
lh1IP := net.ParseIP(lh1)
|
lh1IP := net.ParseIP(lh1)
|
||||||
|
|
||||||
udpServer, _ := NewListener("0.0.0.0", 0, true)
|
udpServer, _ := udp.NewListener(l, "0.0.0.0", 0, true, 2)
|
||||||
|
|
||||||
meh := NewLightHouse(true, 1, []uint32{ip2int(lh1IP)}, 10, 10003, udpServer, false, 1)
|
meh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 255}}, []iputil.VpnIp{iputil.Ip2VpnIp(lh1IP)}, 10, 10003, udpServer, false, 1, false)
|
||||||
meh.AddRemote(ip2int(lh1IP), NewUDPAddr(ip2int(lh1IP), uint16(4242)), true)
|
meh.AddStaticRemote(iputil.Ip2VpnIp(lh1IP), udp.NewAddr(lh1IP, uint16(4242)))
|
||||||
err := meh.ValidateLHStaticEntries()
|
err := meh.ValidateLHStaticEntries()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
lh2 := "10.128.0.3"
|
lh2 := "10.128.0.3"
|
||||||
lh2IP := net.ParseIP(lh2)
|
lh2IP := net.ParseIP(lh2)
|
||||||
|
|
||||||
meh = NewLightHouse(true, 1, []uint32{ip2int(lh1IP), ip2int(lh2IP)}, 10, 10003, udpServer, false, 1)
|
meh = NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 255}}, []iputil.VpnIp{iputil.Ip2VpnIp(lh1IP), iputil.Ip2VpnIp(lh2IP)}, 10, 10003, udpServer, false, 1, false)
|
||||||
meh.AddRemote(ip2int(lh1IP), NewUDPAddr(ip2int(lh1IP), uint16(4242)), true)
|
meh.AddStaticRemote(iputil.Ip2VpnIp(lh1IP), udp.NewAddr(lh1IP, uint16(4242)))
|
||||||
err = meh.ValidateLHStaticEntries()
|
err = meh.ValidateLHStaticEntries()
|
||||||
assert.EqualError(t, err, "Lighthouse 10.128.0.3 does not have a static_host_map entry")
|
assert.EqualError(t, err, "Lighthouse 10.128.0.3 does not have a static_host_map entry")
|
||||||
}
|
}
|
||||||
|
|
||||||
//func NewLightHouse(amLighthouse bool, myIp uint32, ips []string, interval int, nebulaPort int, pc *udpConn, punchBack bool) *LightHouse {
|
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
|
lh1 := "10.128.0.2"
|
||||||
|
lh1IP := net.ParseIP(lh1)
|
||||||
|
|
||||||
/*
|
udpServer, _ := udp.NewListener(l, "0.0.0.0", 0, true, 2)
|
||||||
func TestLHQuery(t *testing.T) {
|
|
||||||
//n := NewLhQueryByIpString("10.128.0.3")
|
|
||||||
_, myNet, _ := net.ParseCIDR("10.128.0.0/16")
|
|
||||||
m := NewHostMap(myNet)
|
|
||||||
y, _ := net.ResolveUDPAddr("udp", "10.128.0.3:11111")
|
|
||||||
m.Add(ip2int(net.ParseIP("127.0.0.1")), y)
|
|
||||||
//t.Errorf("%s", m)
|
|
||||||
_ = m
|
|
||||||
|
|
||||||
_, n, _ := net.ParseCIDR("127.0.0.1/8")
|
lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{iputil.Ip2VpnIp(lh1IP)}, 10, 10003, udpServer, false, 1, false)
|
||||||
|
|
||||||
/*udpServer, err := net.ListenUDP("udp", &net.UDPAddr{Port: 10009})
|
hAddr := udp.NewAddrFromString("4.5.6.7:12345")
|
||||||
if err != nil {
|
hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
|
||||||
t.Errorf("%s", err)
|
lh.addrMap[3] = NewRemoteList()
|
||||||
|
lh.addrMap[3].unlockedSetV4(
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
[]*Ip4AndPort{
|
||||||
|
NewIp4AndPort(hAddr.IP, uint32(hAddr.Port)),
|
||||||
|
NewIp4AndPort(hAddr2.IP, uint32(hAddr2.Port)),
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
rAddr := udp.NewAddrFromString("1.2.2.3:12345")
|
||||||
|
rAddr2 := udp.NewAddrFromString("1.2.2.3:12346")
|
||||||
|
lh.addrMap[2] = NewRemoteList()
|
||||||
|
lh.addrMap[2].unlockedSetV4(
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
[]*Ip4AndPort{
|
||||||
|
NewIp4AndPort(rAddr.IP, uint32(rAddr.Port)),
|
||||||
|
NewIp4AndPort(rAddr2.IP, uint32(rAddr2.Port)),
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
mw := &mockEncWriter{}
|
||||||
|
|
||||||
|
b.Run("notfound", func(b *testing.B) {
|
||||||
|
lhh := lh.NewRequestHandler()
|
||||||
|
req := &NebulaMeta{
|
||||||
|
Type: NebulaMeta_HostQuery,
|
||||||
|
Details: &NebulaMetaDetails{
|
||||||
|
VpnIp: 4,
|
||||||
|
Ip4AndPorts: nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
p, err := proto.Marshal(req)
|
||||||
meh := NewLightHouse(n, m, []string{"10.128.0.2"}, false, 10, 10003, 10004)
|
assert.NoError(b, err)
|
||||||
//t.Error(m.Hosts)
|
for n := 0; n < b.N; n++ {
|
||||||
meh2, err := meh.Query(ip2int(net.ParseIP("10.128.0.3")))
|
lhh.HandleRequest(rAddr, 2, p, mw)
|
||||||
t.Error(err)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
t.Errorf("%s", meh2)
|
})
|
||||||
t.Errorf("%s", n)
|
b.Run("found", func(b *testing.B) {
|
||||||
|
lhh := lh.NewRequestHandler()
|
||||||
|
req := &NebulaMeta{
|
||||||
|
Type: NebulaMeta_HostQuery,
|
||||||
|
Details: &NebulaMetaDetails{
|
||||||
|
VpnIp: 3,
|
||||||
|
Ip4AndPorts: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
p, err := proto.Marshal(req)
|
||||||
|
assert.NoError(b, err)
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
lhh.HandleRequest(rAddr, 2, p, mw)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLighthouse_Memory(t *testing.T) {
|
||||||
|
l := util.NewTestLogger()
|
||||||
|
|
||||||
|
myUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.2"), Port: 4242}
|
||||||
|
myUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4242}
|
||||||
|
myUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.2"), Port: 4242}
|
||||||
|
myUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.2"), Port: 4242}
|
||||||
|
myUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.2"), Port: 4242}
|
||||||
|
myUdpAddr5 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4243}
|
||||||
|
myUdpAddr6 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4244}
|
||||||
|
myUdpAddr7 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4245}
|
||||||
|
myUdpAddr8 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4246}
|
||||||
|
myUdpAddr9 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4247}
|
||||||
|
myUdpAddr10 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4248}
|
||||||
|
myUdpAddr11 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4249}
|
||||||
|
myVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.2"))
|
||||||
|
|
||||||
|
theirUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.3"), Port: 4242}
|
||||||
|
theirUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.3"), Port: 4242}
|
||||||
|
theirUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.3"), Port: 4242}
|
||||||
|
theirUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.3"), Port: 4242}
|
||||||
|
theirUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.3"), Port: 4242}
|
||||||
|
theirVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.3"))
|
||||||
|
|
||||||
|
udpServer, _ := udp.NewListener(l, "0.0.0.0", 0, true, 2)
|
||||||
|
lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, []iputil.VpnIp{}, 10, 10003, udpServer, false, 1, false)
|
||||||
|
lhh := lh.NewRequestHandler()
|
||||||
|
|
||||||
|
// Test that my first update responds with just that
|
||||||
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr2}, lhh)
|
||||||
|
r := newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr2)
|
||||||
|
|
||||||
|
// Ensure we don't accumulate addresses
|
||||||
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr3}, lhh)
|
||||||
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr3)
|
||||||
|
|
||||||
|
// Grow it back to 2
|
||||||
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr4}, lhh)
|
||||||
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
|
||||||
|
|
||||||
|
// Update a different host
|
||||||
|
newLHHostUpdate(theirUdpAddr0, theirVpnIp, []*udp.Addr{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh)
|
||||||
|
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, myVpnIp, lhh)
|
||||||
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
|
||||||
|
|
||||||
|
// Make sure we didn't get changed
|
||||||
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
|
||||||
|
|
||||||
|
// Ensure proper ordering and limiting
|
||||||
|
// Send 12 addrs, get 10 back, the last 2 removed, allowing the duplicate to remain (clients dedupe)
|
||||||
|
newLHHostUpdate(
|
||||||
|
myUdpAddr0,
|
||||||
|
myVpnIp,
|
||||||
|
[]*udp.Addr{
|
||||||
|
myUdpAddr1,
|
||||||
|
myUdpAddr2,
|
||||||
|
myUdpAddr3,
|
||||||
|
myUdpAddr4,
|
||||||
|
myUdpAddr5,
|
||||||
|
myUdpAddr5, //Duplicated on purpose
|
||||||
|
myUdpAddr6,
|
||||||
|
myUdpAddr7,
|
||||||
|
myUdpAddr8,
|
||||||
|
myUdpAddr9,
|
||||||
|
myUdpAddr10,
|
||||||
|
myUdpAddr11, // This should get cut
|
||||||
|
}, lhh)
|
||||||
|
|
||||||
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
|
assertIp4InArray(
|
||||||
|
t,
|
||||||
|
r.msg.Details.Ip4AndPorts,
|
||||||
|
myUdpAddr1, myUdpAddr2, myUdpAddr3, myUdpAddr4, myUdpAddr5, myUdpAddr5, myUdpAddr6, myUdpAddr7, myUdpAddr8, myUdpAddr9,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Make sure we won't add ips in our vpn network
|
||||||
|
bad1 := &udp.Addr{IP: net.ParseIP("10.128.0.99"), Port: 4242}
|
||||||
|
bad2 := &udp.Addr{IP: net.ParseIP("10.128.0.100"), Port: 4242}
|
||||||
|
good := &udp.Addr{IP: net.ParseIP("1.128.0.99"), Port: 4242}
|
||||||
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{bad1, bad2, good}, lhh)
|
||||||
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
|
||||||
|
req := &NebulaMeta{
|
||||||
|
Type: NebulaMeta_HostQuery,
|
||||||
|
Details: &NebulaMetaDetails{
|
||||||
|
VpnIp: uint32(queryVpnIp),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := req.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &testEncWriter{}
|
||||||
|
lhh.HandleRequest(fromAddr, myVpnIp, b, w)
|
||||||
|
return w.lastReply
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLHHostUpdate(fromAddr *udp.Addr, vpnIp iputil.VpnIp, addrs []*udp.Addr, lhh *LightHouseHandler) {
|
||||||
|
req := &NebulaMeta{
|
||||||
|
Type: NebulaMeta_HostUpdateNotification,
|
||||||
|
Details: &NebulaMetaDetails{
|
||||||
|
VpnIp: uint32(vpnIp),
|
||||||
|
Ip4AndPorts: make([]*Ip4AndPort, len(addrs)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range addrs {
|
||||||
|
req.Details.Ip4AndPorts[k] = &Ip4AndPort{Ip: uint32(iputil.Ip2VpnIp(v.IP)), Port: uint32(v.Port)}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := req.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &testEncWriter{}
|
||||||
|
lhh.HandleRequest(fromAddr, vpnIp, b, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: this is a RemoteList test
|
||||||
|
//func Test_lhRemoteAllowList(t *testing.T) {
|
||||||
|
// l := NewTestLogger()
|
||||||
|
// c := NewConfig(l)
|
||||||
|
// c.Settings["remoteallowlist"] = map[interface{}]interface{}{
|
||||||
|
// "10.20.0.0/12": false,
|
||||||
|
// }
|
||||||
|
// allowList, err := c.GetAllowList("remoteallowlist", false)
|
||||||
|
// assert.Nil(t, err)
|
||||||
|
//
|
||||||
|
// lh1 := "10.128.0.2"
|
||||||
|
// lh1IP := net.ParseIP(lh1)
|
||||||
|
//
|
||||||
|
// udpServer, _ := NewListener(l, "0.0.0.0", 0, true)
|
||||||
|
//
|
||||||
|
// lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, []uint32{ip2int(lh1IP)}, 10, 10003, udpServer, false, 1, false)
|
||||||
|
// lh.SetRemoteAllowList(allowList)
|
||||||
|
//
|
||||||
|
// // A disallowed ip should not enter the cache but we should end up with an empty entry in the addrMap
|
||||||
|
// remote1IP := net.ParseIP("10.20.0.3")
|
||||||
|
// remotes := lh.unlockedGetRemoteList(ip2int(remote1IP))
|
||||||
|
// remotes.unlockedPrependV4(ip2int(remote1IP), NewIp4AndPort(remote1IP, 4242))
|
||||||
|
// assert.NotNil(t, lh.addrMap[ip2int(remote1IP)])
|
||||||
|
// assert.Empty(t, lh.addrMap[ip2int(remote1IP)].CopyAddrs([]*net.IPNet{}))
|
||||||
|
//
|
||||||
|
// // Make sure a good ip enters the cache and addrMap
|
||||||
|
// remote2IP := net.ParseIP("10.128.0.3")
|
||||||
|
// remote2UDPAddr := NewUDPAddr(remote2IP, uint16(4242))
|
||||||
|
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote2UDPAddr.IP, uint32(remote2UDPAddr.Port)), false, false)
|
||||||
|
// assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr)
|
||||||
|
//
|
||||||
|
// // Another good ip gets into the cache, ordering is inverted
|
||||||
|
// remote3IP := net.ParseIP("10.128.0.4")
|
||||||
|
// remote3UDPAddr := NewUDPAddr(remote3IP, uint16(4243))
|
||||||
|
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote3UDPAddr.IP, uint32(remote3UDPAddr.Port)), false, false)
|
||||||
|
// assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr, remote3UDPAddr)
|
||||||
|
//
|
||||||
|
// // If we exceed the length limit we should only have the most recent addresses
|
||||||
|
// addedAddrs := []*udpAddr{}
|
||||||
|
// for i := 0; i < 11; i++ {
|
||||||
|
// remoteUDPAddr := NewUDPAddr(net.IP{10, 128, 0, 4}, uint16(4243+i))
|
||||||
|
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remoteUDPAddr.IP, uint32(remoteUDPAddr.Port)), false, false)
|
||||||
|
// // The first entry here is a duplicate, don't add it to the assert list
|
||||||
|
// if i != 0 {
|
||||||
|
// addedAddrs = append(addedAddrs, remoteUDPAddr)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // We should only have the last 10 of what we tried to add
|
||||||
|
// assert.True(t, len(addedAddrs) >= 10, "We should have tried to add at least 10 addresses")
|
||||||
|
// assertUdpAddrInArray(
|
||||||
|
// t,
|
||||||
|
// lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}),
|
||||||
|
// addedAddrs[0],
|
||||||
|
// addedAddrs[1],
|
||||||
|
// addedAddrs[2],
|
||||||
|
// addedAddrs[3],
|
||||||
|
// addedAddrs[4],
|
||||||
|
// addedAddrs[5],
|
||||||
|
// addedAddrs[6],
|
||||||
|
// addedAddrs[7],
|
||||||
|
// addedAddrs[8],
|
||||||
|
// addedAddrs[9],
|
||||||
|
// )
|
||||||
|
//}
|
||||||
|
|
||||||
|
func Test_ipMaskContains(t *testing.T) {
|
||||||
|
assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.0.255"))))
|
||||||
|
assert.False(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1"))))
|
||||||
|
assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1"))))
|
||||||
|
}
|
||||||
|
|
||||||
|
type testLhReply struct {
|
||||||
|
nebType header.MessageType
|
||||||
|
nebSubType header.MessageSubType
|
||||||
|
vpnIp iputil.VpnIp
|
||||||
|
msg *NebulaMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
type testEncWriter struct {
|
||||||
|
lastReply testLhReply
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
|
||||||
|
tw.lastReply = testLhReply{
|
||||||
|
nebType: t,
|
||||||
|
nebSubType: st,
|
||||||
|
vpnIp: vpnIp,
|
||||||
|
msg: &NebulaMeta{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := proto.Unmarshal(p, tw.lastReply.msg)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
|
||||||
|
func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...*udp.Addr) {
|
||||||
|
assert.Len(t, have, len(want))
|
||||||
|
for k, w := range want {
|
||||||
|
if !(have[k].Ip == uint32(iputil.Ip2VpnIp(w.IP)) && have[k].Port == uint32(w.Port)) {
|
||||||
|
assert.Fail(t, fmt.Sprintf("Response did not contain: %v:%v at %v; %v", w.IP, w.Port, k, translateV4toUdpAddr(have)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertUdpAddrInArray asserts every address in want is at the same position in have and that the lengths match
|
||||||
|
func assertUdpAddrInArray(t *testing.T, have []*udp.Addr, want ...*udp.Addr) {
|
||||||
|
assert.Len(t, have, len(want))
|
||||||
|
for k, w := range want {
|
||||||
|
if !(have[k].IP.Equal(w.IP) && have[k].Port == w.Port) {
|
||||||
|
assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v; %v", w, k, have))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func translateV4toUdpAddr(ips []*Ip4AndPort) []*udp.Addr {
|
||||||
|
addrs := make([]*udp.Addr, len(ips))
|
||||||
|
for k, v := range ips {
|
||||||
|
addrs[k] = NewUDPAddrFromLH4(v)
|
||||||
|
}
|
||||||
|
return addrs
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|||||||
78
logger.go
Normal file
78
logger.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ContextualError struct {
|
||||||
|
RealError error
|
||||||
|
Fields map[string]interface{}
|
||||||
|
Context string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewContextualError(msg string, fields map[string]interface{}, realError error) ContextualError {
|
||||||
|
return ContextualError{Context: msg, Fields: fields, RealError: realError}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ce ContextualError) Error() string {
|
||||||
|
if ce.RealError == nil {
|
||||||
|
return ce.Context
|
||||||
|
}
|
||||||
|
return ce.RealError.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ce ContextualError) Unwrap() error {
|
||||||
|
if ce.RealError == nil {
|
||||||
|
return errors.New(ce.Context)
|
||||||
|
}
|
||||||
|
return ce.RealError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ce *ContextualError) Log(lr *logrus.Logger) {
|
||||||
|
if ce.RealError != nil {
|
||||||
|
lr.WithFields(ce.Fields).WithError(ce.RealError).Error(ce.Context)
|
||||||
|
} else {
|
||||||
|
lr.WithFields(ce.Fields).Error(ce.Context)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func configLogger(l *logrus.Logger, c *config.C) error {
|
||||||
|
// set up our logging level
|
||||||
|
logLevel, err := logrus.ParseLevel(strings.ToLower(c.GetString("logging.level", "info")))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s; possible levels: %s", err, logrus.AllLevels)
|
||||||
|
}
|
||||||
|
l.SetLevel(logLevel)
|
||||||
|
|
||||||
|
disableTimestamp := c.GetBool("logging.disable_timestamp", false)
|
||||||
|
timestampFormat := c.GetString("logging.timestamp_format", "")
|
||||||
|
fullTimestamp := (timestampFormat != "")
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = time.RFC3339
|
||||||
|
}
|
||||||
|
|
||||||
|
logFormat := strings.ToLower(c.GetString("logging.format", "text"))
|
||||||
|
switch logFormat {
|
||||||
|
case "text":
|
||||||
|
l.Formatter = &logrus.TextFormatter{
|
||||||
|
TimestampFormat: timestampFormat,
|
||||||
|
FullTimestamp: fullTimestamp,
|
||||||
|
DisableTimestamp: disableTimestamp,
|
||||||
|
}
|
||||||
|
case "json":
|
||||||
|
l.Formatter = &logrus.JSONFormatter{
|
||||||
|
TimestampFormat: timestampFormat,
|
||||||
|
DisableTimestamp: disableTimestamp,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown log format `%s`. possible formats: %s", logFormat, []string{"text", "json"})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
67
logger_test.go
Normal file
67
logger_test.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestLogWriter struct {
|
||||||
|
Logs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTestLogWriter() *TestLogWriter {
|
||||||
|
return &TestLogWriter{Logs: make([]string, 0)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tl *TestLogWriter) Write(p []byte) (n int, err error) {
|
||||||
|
tl.Logs = append(tl.Logs, string(p))
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tl *TestLogWriter) Reset() {
|
||||||
|
tl.Logs = tl.Logs[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestContextualError_Log(t *testing.T) {
|
||||||
|
l := logrus.New()
|
||||||
|
l.Formatter = &logrus.TextFormatter{
|
||||||
|
DisableTimestamp: true,
|
||||||
|
DisableColors: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
tl := NewTestLogWriter()
|
||||||
|
l.Out = tl
|
||||||
|
|
||||||
|
// Test a full context line
|
||||||
|
tl.Reset()
|
||||||
|
e := NewContextualError("test message", m{"field": "1"}, errors.New("error"))
|
||||||
|
e.Log(l)
|
||||||
|
assert.Equal(t, []string{"level=error msg=\"test message\" error=error field=1\n"}, tl.Logs)
|
||||||
|
|
||||||
|
// Test a line with an error and msg but no fields
|
||||||
|
tl.Reset()
|
||||||
|
e = NewContextualError("test message", nil, errors.New("error"))
|
||||||
|
e.Log(l)
|
||||||
|
assert.Equal(t, []string{"level=error msg=\"test message\" error=error\n"}, tl.Logs)
|
||||||
|
|
||||||
|
// Test just a context and fields
|
||||||
|
tl.Reset()
|
||||||
|
e = NewContextualError("test message", m{"field": "1"}, nil)
|
||||||
|
e.Log(l)
|
||||||
|
assert.Equal(t, []string{"level=error msg=\"test message\" field=1\n"}, tl.Logs)
|
||||||
|
|
||||||
|
// Test just a context
|
||||||
|
tl.Reset()
|
||||||
|
e = NewContextualError("test message", nil, nil)
|
||||||
|
e.Log(l)
|
||||||
|
assert.Equal(t, []string{"level=error msg=\"test message\"\n"}, tl.Logs)
|
||||||
|
|
||||||
|
// Test just an error
|
||||||
|
tl.Reset()
|
||||||
|
e = NewContextualError("", nil, errors.New("error"))
|
||||||
|
e.Log(l)
|
||||||
|
assert.Equal(t, []string{"level=error error=error\n"}, tl.Logs)
|
||||||
|
}
|
||||||
387
main.go
387
main.go
@@ -1,145 +1,220 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/sshd"
|
"github.com/slackhq/nebula/sshd"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var l = logrus.New()
|
|
||||||
|
|
||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
|
|
||||||
func Main(configPath string, configTest bool, buildVersion string) {
|
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, tunFd *int) (retcon *Control, reterr error) {
|
||||||
l.Out = os.Stdout
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
// Automatically cancel the context if Main returns an error, to signal all created goroutines to quit.
|
||||||
|
defer func() {
|
||||||
|
if reterr != nil {
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
l := logger
|
||||||
l.Formatter = &logrus.TextFormatter{
|
l.Formatter = &logrus.TextFormatter{
|
||||||
FullTimestamp: true,
|
FullTimestamp: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
config := NewConfig()
|
|
||||||
err := config.Load(configPath)
|
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).Error("Failed to load config")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print the config if in test, the exit comes later
|
// Print the config if in test, the exit comes later
|
||||||
if configTest {
|
if configTest {
|
||||||
b, err := yaml.Marshal(config.Settings)
|
b, err := yaml.Marshal(c.Settings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Println(err)
|
return nil, err
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Print the final config
|
||||||
l.Println(string(b))
|
l.Println(string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = configLogger(config)
|
err := configLogger(l, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Failed to configure the logger")
|
return nil, NewContextualError("Failed to configure the logger", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
config.RegisterReloadCallback(func(c *Config) {
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
err := configLogger(c)
|
err := configLogger(l, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Failed to configure the logger")
|
l.WithError(err).Error("Failed to configure the logger")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// trustedCAs is currently a global, so loadCA operates on that global directly
|
caPool, err := loadCAFromConfig(l, c)
|
||||||
trustedCAs, err = loadCAFromConfig(config)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//The errors coming out of loadCA are already nicely formatted
|
//The errors coming out of loadCA are already nicely formatted
|
||||||
l.WithError(err).Fatal("Failed to load ca from config")
|
return nil, NewContextualError("Failed to load ca from config", nil, err)
|
||||||
}
|
}
|
||||||
l.WithField("fingerprints", trustedCAs.GetFingerprints()).Debug("Trusted CA fingerprints")
|
l.WithField("fingerprints", caPool.GetFingerprints()).Debug("Trusted CA fingerprints")
|
||||||
|
|
||||||
cs, err := NewCertStateFromConfig(config)
|
cs, err := NewCertStateFromConfig(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//The errors coming out of NewCertStateFromConfig are already nicely formatted
|
//The errors coming out of NewCertStateFromConfig are already nicely formatted
|
||||||
l.WithError(err).Fatal("Failed to load certificate from config")
|
return nil, NewContextualError("Failed to load certificate from config", nil, err)
|
||||||
}
|
}
|
||||||
l.WithField("cert", cs.certificate).Debug("Client nebula certificate")
|
l.WithField("cert", cs.certificate).Debug("Client nebula certificate")
|
||||||
|
|
||||||
fw, err := NewFirewallFromConfig(cs.certificate, config)
|
fw, err := NewFirewallFromConfig(l, cs.certificate, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Error while loading firewall rules")
|
return nil, NewContextualError("Error while loading firewall rules", nil, err)
|
||||||
}
|
}
|
||||||
l.WithField("firewallHash", fw.GetRuleHash()).Info("Firewall started")
|
l.WithField("firewallHash", fw.GetRuleHash()).Info("Firewall started")
|
||||||
|
|
||||||
// TODO: make sure mask is 4 bytes
|
// TODO: make sure mask is 4 bytes
|
||||||
tunCidr := cs.certificate.Details.Ips[0]
|
tunCidr := cs.certificate.Details.Ips[0]
|
||||||
routes, err := parseRoutes(config, tunCidr)
|
routes, err := parseRoutes(c, tunCidr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Could not parse tun.routes")
|
return nil, NewContextualError("Could not parse tun.routes", nil, err)
|
||||||
}
|
}
|
||||||
unsafeRoutes, err := parseUnsafeRoutes(config, tunCidr)
|
unsafeRoutes, err := parseUnsafeRoutes(c, tunCidr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Could not parse tun.unsafe_routes")
|
return nil, NewContextualError("Could not parse tun.unsafe_routes", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
||||||
wireSSHReload(ssh, config)
|
wireSSHReload(l, ssh, c)
|
||||||
if config.GetBool("sshd.enabled", false) {
|
var sshStart func()
|
||||||
err = configSSH(ssh, config)
|
if c.GetBool("sshd.enabled", false) {
|
||||||
|
sshStart, err = configSSH(l, ssh, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Error while configuring the sshd")
|
return nil, NewContextualError("Error while configuring the sshd", nil, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
psk, err := NewPskFromConfig(c, iputil.Ip2VpnIp(tunCidr.IP))
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewContextualError("Failed to create psk", nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// All non system modifying configuration consumption should live above this line
|
// All non system modifying configuration consumption should live above this line
|
||||||
// tun config, listeners, anything modifying the computer should be below
|
// tun config, listeners, anything modifying the computer should be below
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
var tun *Tun
|
var routines int
|
||||||
if !configTest {
|
|
||||||
config.CatchHUP()
|
|
||||||
|
|
||||||
// set up our tun dev
|
// If `routines` is set, use that and ignore the specific values
|
||||||
tun, err = newTun(
|
if routines = c.GetInt("routines", 0); routines != 0 {
|
||||||
config.GetString("tun.dev", ""),
|
if routines < 1 {
|
||||||
|
routines = 1
|
||||||
|
}
|
||||||
|
if routines > 1 {
|
||||||
|
l.WithField("routines", routines).Info("Using multiple routines")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// deprecated and undocumented
|
||||||
|
tunQueues := c.GetInt("tun.routines", 1)
|
||||||
|
udpQueues := c.GetInt("listen.routines", 1)
|
||||||
|
if tunQueues > udpQueues {
|
||||||
|
routines = tunQueues
|
||||||
|
} else {
|
||||||
|
routines = udpQueues
|
||||||
|
}
|
||||||
|
if routines != 1 {
|
||||||
|
l.WithField("routines", routines).Warn("Setting tun.routines and listen.routines is deprecated. Use `routines` instead")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPERIMENTAL
|
||||||
|
// Intentionally not documented yet while we do more testing and determine
|
||||||
|
// a good default value.
|
||||||
|
conntrackCacheTimeout := c.GetDuration("firewall.conntrack.routine_cache_timeout", 0)
|
||||||
|
if routines > 1 && !c.IsSet("firewall.conntrack.routine_cache_timeout") {
|
||||||
|
// Use a different default if we are running with multiple routines
|
||||||
|
conntrackCacheTimeout = 1 * time.Second
|
||||||
|
}
|
||||||
|
if conntrackCacheTimeout > 0 {
|
||||||
|
l.WithField("duration", conntrackCacheTimeout).Info("Using routine-local conntrack cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
var tun Inside
|
||||||
|
if !configTest {
|
||||||
|
c.CatchHUP(ctx)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case c.GetBool("tun.disabled", false):
|
||||||
|
tun = newDisabledTun(tunCidr, c.GetInt("tun.tx_queue", 500), c.GetBool("stats.message_metrics", false), l)
|
||||||
|
case tunFd != nil:
|
||||||
|
tun, err = newTunFromFd(
|
||||||
|
l,
|
||||||
|
*tunFd,
|
||||||
tunCidr,
|
tunCidr,
|
||||||
config.GetInt("tun.mtu", DEFAULT_MTU),
|
c.GetInt("tun.mtu", DEFAULT_MTU),
|
||||||
routes,
|
routes,
|
||||||
unsafeRoutes,
|
unsafeRoutes,
|
||||||
config.GetInt("tun.tx_queue", 500),
|
c.GetInt("tun.tx_queue", 500),
|
||||||
)
|
)
|
||||||
|
default:
|
||||||
|
tun, err = newTun(
|
||||||
|
l,
|
||||||
|
c.GetString("tun.dev", ""),
|
||||||
|
tunCidr,
|
||||||
|
c.GetInt("tun.mtu", DEFAULT_MTU),
|
||||||
|
routes,
|
||||||
|
unsafeRoutes,
|
||||||
|
c.GetInt("tun.tx_queue", 500),
|
||||||
|
routines > 1,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to get a tun/tap device")
|
return nil, NewContextualError("Failed to get a tun/tap device", nil, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if reterr != nil {
|
||||||
|
tun.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// set up our UDP listener
|
// set up our UDP listener
|
||||||
udpQueues := config.GetInt("listen.routines", 1)
|
udpConns := make([]*udp.Conn, routines)
|
||||||
var udpServer *udpConn
|
port := c.GetInt("listen.port", 0)
|
||||||
|
|
||||||
if !configTest {
|
if !configTest {
|
||||||
udpServer, err = NewListener(config.GetString("listen.host", "0.0.0.0"), config.GetInt("listen.port", 0), udpQueues > 1)
|
for i := 0; i < routines; i++ {
|
||||||
|
udpServer, err := udp.NewListener(l, c.GetString("listen.host", "0.0.0.0"), port, routines > 1, c.GetInt("listen.batch", 64))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to open udp listener")
|
return nil, NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
||||||
|
}
|
||||||
|
udpServer.ReloadConfig(c)
|
||||||
|
udpConns[i] = udpServer
|
||||||
|
|
||||||
|
// If port is dynamic, discover it
|
||||||
|
if port == 0 {
|
||||||
|
uPort, err := udpServer.LocalAddr()
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewContextualError("Failed to get listening port", nil, err)
|
||||||
|
}
|
||||||
|
port = int(uPort.Port)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
udpServer.reloadConfig(config)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up my internal host map
|
// Set up my internal host map
|
||||||
var preferredRanges []*net.IPNet
|
var preferredRanges []*net.IPNet
|
||||||
rawPreferredRanges := config.GetStringSlice("preferred_ranges", []string{})
|
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
|
||||||
// First, check if 'preferred_ranges' is set and fallback to 'local_range'
|
// First, check if 'preferred_ranges' is set and fallback to 'local_range'
|
||||||
if len(rawPreferredRanges) > 0 {
|
if len(rawPreferredRanges) > 0 {
|
||||||
for _, rawPreferredRange := range rawPreferredRanges {
|
for _, rawPreferredRange := range rawPreferredRanges {
|
||||||
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to parse preferred ranges")
|
return nil, NewContextualError("Failed to parse preferred ranges", nil, err)
|
||||||
}
|
}
|
||||||
preferredRanges = append(preferredRanges, preferredRange)
|
preferredRanges = append(preferredRanges, preferredRange)
|
||||||
}
|
}
|
||||||
@@ -148,11 +223,11 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
|||||||
// local_range was superseded by preferred_ranges. If it is still present,
|
// local_range was superseded by preferred_ranges. If it is still present,
|
||||||
// merge the local_range setting into preferred_ranges. We will probably
|
// merge the local_range setting into preferred_ranges. We will probably
|
||||||
// deprecate local_range and remove in the future.
|
// deprecate local_range and remove in the future.
|
||||||
rawLocalRange := config.GetString("local_range", "")
|
rawLocalRange := c.GetString("local_range", "")
|
||||||
if rawLocalRange != "" {
|
if rawLocalRange != "" {
|
||||||
_, localRange, err := net.ParseCIDR(rawLocalRange)
|
_, localRange, err := net.ParseCIDR(rawLocalRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to parse local range")
|
return nil, NewContextualError("Failed to parse local_range", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the entry for local_range was already specified in
|
// Check if the entry for local_range was already specified in
|
||||||
@@ -169,9 +244,10 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hostMap := NewHostMap("main", tunCidr, preferredRanges)
|
hostMap := NewHostMap(l, "main", tunCidr, preferredRanges)
|
||||||
hostMap.SetDefaultRoute(ip2int(net.ParseIP(config.GetString("default_route", "0.0.0.0"))))
|
|
||||||
hostMap.addUnsafeRoutes(&unsafeRoutes)
|
hostMap.addUnsafeRoutes(&unsafeRoutes)
|
||||||
|
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
|
||||||
|
|
||||||
l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created")
|
l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created")
|
||||||
|
|
||||||
@@ -180,98 +256,85 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
|||||||
go hostMap.Promoter(config.GetInt("promoter.interval"))
|
go hostMap.Promoter(config.GetInt("promoter.interval"))
|
||||||
*/
|
*/
|
||||||
|
|
||||||
punchy := NewPunchyFromConfig(config)
|
punchy := NewPunchyFromConfig(c)
|
||||||
if punchy.Punch && !configTest {
|
if punchy.Punch && !configTest {
|
||||||
l.Info("UDP hole punching enabled")
|
l.Info("UDP hole punching enabled")
|
||||||
go hostMap.Punchy(udpServer)
|
go hostMap.Punchy(ctx, udpConns[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
port := config.GetInt("listen.port", 0)
|
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
||||||
// If port is dynamic, discover it
|
|
||||||
if port == 0 && !configTest {
|
|
||||||
uPort, err := udpServer.LocalAddr()
|
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).Fatal("Failed to get listening port")
|
|
||||||
}
|
|
||||||
port = int(uPort.Port)
|
|
||||||
}
|
|
||||||
|
|
||||||
amLighthouse := config.GetBool("lighthouse.am_lighthouse", false)
|
// fatal if am_lighthouse is enabled but we are using an ephemeral port
|
||||||
|
if amLighthouse && (c.GetInt("listen.port", 0) == 0) {
|
||||||
|
return nil, NewContextualError("lighthouse.am_lighthouse enabled on node but no port number is set in config", nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
// warn if am_lighthouse is enabled but upstream lighthouses exists
|
// warn if am_lighthouse is enabled but upstream lighthouses exists
|
||||||
rawLighthouseHosts := config.GetStringSlice("lighthouse.hosts", []string{})
|
rawLighthouseHosts := c.GetStringSlice("lighthouse.hosts", []string{})
|
||||||
if amLighthouse && len(rawLighthouseHosts) != 0 {
|
if amLighthouse && len(rawLighthouseHosts) != 0 {
|
||||||
l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
||||||
}
|
}
|
||||||
|
|
||||||
lighthouseHosts := make([]uint32, len(rawLighthouseHosts))
|
lighthouseHosts := make([]iputil.VpnIp, len(rawLighthouseHosts))
|
||||||
for i, host := range rawLighthouseHosts {
|
for i, host := range rawLighthouseHosts {
|
||||||
ip := net.ParseIP(host)
|
ip := net.ParseIP(host)
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
l.WithField("host", host).Fatalf("Unable to parse lighthouse host entry %v", i+1)
|
return nil, NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, nil)
|
||||||
}
|
}
|
||||||
if !tunCidr.Contains(ip) {
|
if !tunCidr.Contains(ip) {
|
||||||
l.WithField("vpnIp", ip).WithField("network", tunCidr.String()).Fatalf("lighthouse host is not in our subnet, invalid")
|
return nil, NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": tunCidr.String()}, nil)
|
||||||
}
|
}
|
||||||
lighthouseHosts[i] = ip2int(ip)
|
lighthouseHosts[i] = iputil.Ip2VpnIp(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
lightHouse := NewLightHouse(
|
lightHouse := NewLightHouse(
|
||||||
|
l,
|
||||||
amLighthouse,
|
amLighthouse,
|
||||||
ip2int(tunCidr.IP),
|
tunCidr,
|
||||||
lighthouseHosts,
|
lighthouseHosts,
|
||||||
//TODO: change to a duration
|
//TODO: change to a duration
|
||||||
config.GetInt("lighthouse.interval", 10),
|
c.GetInt("lighthouse.interval", 10),
|
||||||
port,
|
uint32(port),
|
||||||
udpServer,
|
udpConns[0],
|
||||||
punchy.Respond,
|
punchy.Respond,
|
||||||
punchy.Delay,
|
punchy.Delay,
|
||||||
|
c.GetBool("stats.lighthouse_metrics", false),
|
||||||
)
|
)
|
||||||
|
|
||||||
remoteAllowList, err := config.GetAllowList("lighthouse.remote_allow_list", false)
|
remoteAllowList, err := NewRemoteAllowListFromConfig(c, "lighthouse.remote_allow_list", "lighthouse.remote_allow_ranges")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Invalid lighthouse.remote_allow_list")
|
return nil, NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
|
||||||
}
|
}
|
||||||
lightHouse.SetRemoteAllowList(remoteAllowList)
|
lightHouse.SetRemoteAllowList(remoteAllowList)
|
||||||
|
|
||||||
localAllowList, err := config.GetAllowList("lighthouse.local_allow_list", true)
|
localAllowList, err := NewLocalAllowListFromConfig(c, "lighthouse.local_allow_list")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Invalid lighthouse.local_allow_list")
|
return nil, NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
|
||||||
}
|
}
|
||||||
lightHouse.SetLocalAllowList(localAllowList)
|
lightHouse.SetLocalAllowList(localAllowList)
|
||||||
|
|
||||||
//TODO: Move all of this inside functions in lighthouse.go
|
//TODO: Move all of this inside functions in lighthouse.go
|
||||||
for k, v := range config.GetMap("static_host_map", map[interface{}]interface{}{}) {
|
for k, v := range c.GetMap("static_host_map", map[interface{}]interface{}{}) {
|
||||||
vpnIp := net.ParseIP(fmt.Sprintf("%v", k))
|
ip := net.ParseIP(fmt.Sprintf("%v", k))
|
||||||
if !tunCidr.Contains(vpnIp) {
|
vpnIp := iputil.Ip2VpnIp(ip)
|
||||||
l.WithField("vpnIp", vpnIp).WithField("network", tunCidr.String()).Fatalf("static_host_map key is not in our subnet, invalid")
|
if !tunCidr.Contains(ip) {
|
||||||
|
return nil, NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": vpnIp, "network": tunCidr.String()}, nil)
|
||||||
}
|
}
|
||||||
vals, ok := v.([]interface{})
|
vals, ok := v.([]interface{})
|
||||||
if ok {
|
if ok {
|
||||||
for _, v := range vals {
|
for _, v := range vals {
|
||||||
parts := strings.Split(fmt.Sprintf("%v", v), ":")
|
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
|
||||||
addr, err := net.ResolveIPAddr("ip", parts[0])
|
|
||||||
if err == nil {
|
|
||||||
ip := addr.IP
|
|
||||||
port, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Fatalf("Static host address for %s could not be parsed: %s", vpnIp, v)
|
return nil, NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp}, err)
|
||||||
}
|
|
||||||
lightHouse.AddRemote(ip2int(vpnIp), NewUDPAddr(ip2int(ip), uint16(port)), true)
|
|
||||||
}
|
}
|
||||||
|
lightHouse.AddStaticRemote(vpnIp, udp.NewAddr(ip, port))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
//TODO: make this all a helper
|
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
|
||||||
parts := strings.Split(fmt.Sprintf("%v", v), ":")
|
|
||||||
addr, err := net.ResolveIPAddr("ip", parts[0])
|
|
||||||
if err == nil {
|
|
||||||
ip := addr.IP
|
|
||||||
port, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Fatalf("Static host address for %s could not be parsed: %s", vpnIp, v)
|
return nil, NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp}, err)
|
||||||
}
|
|
||||||
lightHouse.AddRemote(ip2int(vpnIp), NewUDPAddr(ip2int(ip), uint16(port)), true)
|
|
||||||
}
|
}
|
||||||
|
lightHouse.AddStaticRemote(vpnIp, udp.NewAddr(ip, port))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -280,36 +343,58 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
|||||||
l.WithError(err).Error("Lighthouse unreachable")
|
l.WithError(err).Error("Lighthouse unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
handshakeConfig := HandshakeConfig{
|
var messageMetrics *MessageMetrics
|
||||||
tryInterval: config.GetDuration("handshakes.try_interval", DefaultHandshakeTryInterval),
|
if c.GetBool("stats.message_metrics", false) {
|
||||||
retries: config.GetInt("handshakes.retries", DefaultHandshakeRetries),
|
messageMetrics = newMessageMetrics()
|
||||||
waitRotation: config.GetInt("handshakes.wait_rotation", DefaultHandshakeWaitRotation),
|
} else {
|
||||||
|
messageMetrics = newMessageMetricsOnlyRecvError()
|
||||||
}
|
}
|
||||||
|
|
||||||
handshakeManager := NewHandshakeManager(tunCidr, preferredRanges, hostMap, lightHouse, udpServer, handshakeConfig)
|
handshakeConfig := HandshakeConfig{
|
||||||
|
tryInterval: c.GetDuration("handshakes.try_interval", DefaultHandshakeTryInterval),
|
||||||
|
retries: c.GetInt("handshakes.retries", DefaultHandshakeRetries),
|
||||||
|
triggerBuffer: c.GetInt("handshakes.trigger_buffer", DefaultHandshakeTriggerBuffer),
|
||||||
|
|
||||||
//TODO: These will be reused for psk
|
messageMetrics: messageMetrics,
|
||||||
//handshakeMACKey := config.GetString("handshake_mac.key", "")
|
}
|
||||||
//handshakeAcceptedMACKeys := config.GetStringSlice("handshake_mac.accepted_keys", []string{})
|
|
||||||
|
|
||||||
serveDns := config.GetBool("lighthouse.serve_dns", false)
|
handshakeManager := NewHandshakeManager(l, tunCidr, preferredRanges, hostMap, lightHouse, udpConns[0], handshakeConfig)
|
||||||
checkInterval := config.GetInt("timers.connection_alive_interval", 5)
|
lightHouse.handshakeTrigger = handshakeManager.trigger
|
||||||
pendingDeletionInterval := config.GetInt("timers.pending_deletion_interval", 10)
|
|
||||||
|
serveDns := false
|
||||||
|
if c.GetBool("lighthouse.serve_dns", false) {
|
||||||
|
if c.GetBool("lighthouse.am_lighthouse", false) {
|
||||||
|
serveDns = true
|
||||||
|
} else {
|
||||||
|
l.Warn("DNS server refusing to run because this host is not a lighthouse.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
||||||
|
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
||||||
ifConfig := &InterfaceConfig{
|
ifConfig := &InterfaceConfig{
|
||||||
HostMap: hostMap,
|
HostMap: hostMap,
|
||||||
Inside: tun,
|
Inside: tun,
|
||||||
Outside: udpServer,
|
Outside: udpConns[0],
|
||||||
certState: cs,
|
certState: cs,
|
||||||
Cipher: config.GetString("cipher", "aes"),
|
Cipher: c.GetString("cipher", "aes"),
|
||||||
Firewall: fw,
|
Firewall: fw,
|
||||||
ServeDns: serveDns,
|
ServeDns: serveDns,
|
||||||
HandshakeManager: handshakeManager,
|
HandshakeManager: handshakeManager,
|
||||||
lightHouse: lightHouse,
|
lightHouse: lightHouse,
|
||||||
checkInterval: checkInterval,
|
checkInterval: checkInterval,
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
pendingDeletionInterval: pendingDeletionInterval,
|
||||||
DropLocalBroadcast: config.GetBool("tun.drop_local_broadcast", false),
|
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||||
DropMulticast: config.GetBool("tun.drop_multicast", false),
|
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||||
UDPBatchSize: config.GetInt("listen.batch", 64),
|
routines: routines,
|
||||||
|
MessageMetrics: messageMetrics,
|
||||||
|
version: buildVersion,
|
||||||
|
caPool: caPool,
|
||||||
|
disconnectInvalid: c.GetBool("pki.disconnect_invalid", false),
|
||||||
|
psk: psk,
|
||||||
|
|
||||||
|
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
switch ifConfig.Cipher {
|
switch ifConfig.Cipher {
|
||||||
@@ -318,67 +403,49 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
|||||||
case "chachapoly":
|
case "chachapoly":
|
||||||
noiseEndianness = binary.LittleEndian
|
noiseEndianness = binary.LittleEndian
|
||||||
default:
|
default:
|
||||||
l.Fatalf("Unknown cipher: %v", ifConfig.Cipher)
|
return nil, fmt.Errorf("unknown cipher: %v", ifConfig.Cipher)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ifce *Interface
|
var ifce *Interface
|
||||||
if !configTest {
|
if !configTest {
|
||||||
ifce, err = NewInterface(ifConfig)
|
ifce, err = NewInterface(ctx, ifConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to initialize interface")
|
return nil, fmt.Errorf("failed to initialize interface: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ifce.RegisterConfigChangeCallbacks(config)
|
// TODO: Better way to attach these, probably want a new interface in InterfaceConfig
|
||||||
|
// I don't want to make this initial commit too far-reaching though
|
||||||
|
ifce.writers = udpConns
|
||||||
|
|
||||||
go handshakeManager.Run(ifce)
|
ifce.RegisterConfigChangeCallbacks(c)
|
||||||
go lightHouse.LhUpdateWorker(ifce)
|
|
||||||
|
go handshakeManager.Run(ctx, ifce)
|
||||||
|
go lightHouse.LhUpdateWorker(ctx, ifce)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = startStats(config, configTest)
|
// TODO - stats third-party modules start uncancellable goroutines. Update those libs to accept
|
||||||
|
// a context so that they can exit when the context is Done.
|
||||||
|
statsStart, err := startStats(l, c, buildVersion, configTest)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to start stats emitter")
|
return nil, NewContextualError("Failed to start stats emitter", nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if configTest {
|
if configTest {
|
||||||
os.Exit(0)
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: check if we _should_ be emitting stats
|
//TODO: check if we _should_ be emitting stats
|
||||||
go ifce.emitStats(config.GetDuration("stats.interval", time.Second*10))
|
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
||||||
|
|
||||||
attachCommands(ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
|
attachCommands(l, ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
|
||||||
ifce.Run(config.GetInt("tun.routines", 1), udpQueues, buildVersion)
|
|
||||||
|
|
||||||
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
||||||
|
var dnsStart func()
|
||||||
if amLighthouse && serveDns {
|
if amLighthouse && serveDns {
|
||||||
l.Debugln("Starting dns server")
|
l.Debugln("Starting dns server")
|
||||||
go dnsMain(hostMap, config)
|
dnsStart = dnsMain(l, hostMap, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just sit here and be friendly, main thread.
|
return &Control{ifce, l, cancel, sshStart, statsStart, dnsStart}, nil
|
||||||
shutdownBlock(ifce)
|
|
||||||
}
|
|
||||||
|
|
||||||
func shutdownBlock(ifce *Interface) {
|
|
||||||
var sigChan = make(chan os.Signal)
|
|
||||||
signal.Notify(sigChan, syscall.SIGTERM)
|
|
||||||
signal.Notify(sigChan, syscall.SIGINT)
|
|
||||||
|
|
||||||
sig := <-sigChan
|
|
||||||
l.WithField("signal", sig).Info("Caught signal, shutting down")
|
|
||||||
|
|
||||||
//TODO: stop tun and udp routines, the lock on hostMap does effectively does that though
|
|
||||||
//TODO: this is probably better as a function in ConnectionManager or HostMap directly
|
|
||||||
ifce.hostMap.Lock()
|
|
||||||
for _, h := range ifce.hostMap.Hosts {
|
|
||||||
if h.ConnectionState.ready {
|
|
||||||
ifce.send(closeTunnel, 0, h.ConnectionState, h, h.remote, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
|
||||||
l.WithField("vpnIp", IntIp(h.hostId)).WithField("udpAddr", h.remote).
|
|
||||||
Debug("Sending close tunnel message")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ifce.hostMap.Unlock()
|
|
||||||
|
|
||||||
l.WithField("signal", sig).Info("Goodbye")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
package nebula
|
|
||||||
100
message_metrics.go
Normal file
100
message_metrics.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/rcrowley/go-metrics"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
)
|
||||||
|
|
||||||
|
//TODO: this can probably move into the header package
|
||||||
|
|
||||||
|
type MessageMetrics struct {
|
||||||
|
rx [][]metrics.Counter
|
||||||
|
tx [][]metrics.Counter
|
||||||
|
|
||||||
|
rxUnknown metrics.Counter
|
||||||
|
txUnknown metrics.Counter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MessageMetrics) Rx(t header.MessageType, s header.MessageSubType, i int64) {
|
||||||
|
if m != nil {
|
||||||
|
if t >= 0 && int(t) < len(m.rx) && s >= 0 && int(s) < len(m.rx[t]) {
|
||||||
|
m.rx[t][s].Inc(i)
|
||||||
|
} else if m.rxUnknown != nil {
|
||||||
|
m.rxUnknown.Inc(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *MessageMetrics) Tx(t header.MessageType, s header.MessageSubType, i int64) {
|
||||||
|
if m != nil {
|
||||||
|
if t >= 0 && int(t) < len(m.tx) && s >= 0 && int(s) < len(m.tx[t]) {
|
||||||
|
m.tx[t][s].Inc(i)
|
||||||
|
} else if m.txUnknown != nil {
|
||||||
|
m.txUnknown.Inc(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMessageMetrics() *MessageMetrics {
|
||||||
|
gen := func(t string) [][]metrics.Counter {
|
||||||
|
return [][]metrics.Counter{
|
||||||
|
{
|
||||||
|
metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.handshake_ixpsk0", t), nil),
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
{metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.recv_error", t), nil)},
|
||||||
|
{metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.lighthouse", t), nil)},
|
||||||
|
{
|
||||||
|
metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.test_request", t), nil),
|
||||||
|
metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.test_response", t), nil),
|
||||||
|
},
|
||||||
|
{metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.close_tunnel", t), nil)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &MessageMetrics{
|
||||||
|
rx: gen("rx"),
|
||||||
|
tx: gen("tx"),
|
||||||
|
|
||||||
|
rxUnknown: metrics.GetOrRegisterCounter("messages.rx.other", nil),
|
||||||
|
txUnknown: metrics.GetOrRegisterCounter("messages.tx.other", nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Historically we only recorded recv_error, so this is backwards compat
|
||||||
|
func newMessageMetricsOnlyRecvError() *MessageMetrics {
|
||||||
|
gen := func(t string) [][]metrics.Counter {
|
||||||
|
return [][]metrics.Counter{
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
{metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.recv_error", t), nil)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &MessageMetrics{
|
||||||
|
rx: gen("rx"),
|
||||||
|
tx: gen("tx"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLighthouseMetrics() *MessageMetrics {
|
||||||
|
gen := func(t string) [][]metrics.Counter {
|
||||||
|
h := make([][]metrics.Counter, len(NebulaMeta_MessageType_name))
|
||||||
|
used := []NebulaMeta_MessageType{
|
||||||
|
NebulaMeta_HostQuery,
|
||||||
|
NebulaMeta_HostQueryReply,
|
||||||
|
NebulaMeta_HostUpdateNotification,
|
||||||
|
NebulaMeta_HostPunchNotification,
|
||||||
|
}
|
||||||
|
for _, i := range used {
|
||||||
|
h[i] = []metrics.Counter{metrics.GetOrRegisterCounter(fmt.Sprintf("lighthouse.%s.%s", t, i.String()), nil)}
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
return &MessageMetrics{
|
||||||
|
rx: gen("rx"),
|
||||||
|
tx: gen("tx"),
|
||||||
|
|
||||||
|
rxUnknown: metrics.GetOrRegisterCounter("lighthouse.rx.other", nil),
|
||||||
|
txUnknown: metrics.GetOrRegisterCounter("lighthouse.tx.other", nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
1680
nebula.pb.go
1680
nebula.pb.go
File diff suppressed because it is too large
Load Diff
14
nebula.proto
14
nebula.proto
@@ -1,6 +1,8 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
package nebula;
|
package nebula;
|
||||||
|
|
||||||
|
option go_package = "github.com/slackhq/nebula";
|
||||||
|
|
||||||
message NebulaMeta {
|
message NebulaMeta {
|
||||||
enum MessageType {
|
enum MessageType {
|
||||||
None = 0;
|
None = 0;
|
||||||
@@ -20,19 +22,23 @@ message NebulaMeta {
|
|||||||
NebulaMetaDetails Details = 2;
|
NebulaMetaDetails Details = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
message NebulaMetaDetails {
|
message NebulaMetaDetails {
|
||||||
|
|
||||||
uint32 VpnIp = 1;
|
uint32 VpnIp = 1;
|
||||||
repeated IpAndPort IpAndPorts = 2;
|
repeated Ip4AndPort Ip4AndPorts = 2;
|
||||||
|
repeated Ip6AndPort Ip6AndPorts = 4;
|
||||||
uint32 counter = 3;
|
uint32 counter = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message IpAndPort {
|
message Ip4AndPort {
|
||||||
uint32 Ip = 1;
|
uint32 Ip = 1;
|
||||||
uint32 Port = 2;
|
uint32 Port = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message Ip6AndPort {
|
||||||
|
uint64 Hi = 1;
|
||||||
|
uint64 Lo = 2;
|
||||||
|
uint32 Port = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message NebulaPing {
|
message NebulaPing {
|
||||||
enum MessageType {
|
enum MessageType {
|
||||||
|
|||||||
1
noise.go
1
noise.go
@@ -22,7 +22,6 @@ type NebulaCipherState struct {
|
|||||||
|
|
||||||
func NewNebulaCipherState(s *noise.CipherState) *NebulaCipherState {
|
func NewNebulaCipherState(s *noise.CipherState) *NebulaCipherState {
|
||||||
return &NebulaCipherState{c: s.Cipher()}
|
return &NebulaCipherState{c: s.Cipher()}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
|
func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
|
||||||
|
|||||||
224
outside.go
224
outside.go
@@ -2,18 +2,18 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
// "github.com/google/gopacket"
|
"github.com/slackhq/nebula/firewall"
|
||||||
// "github.com/google/gopacket/layers"
|
"github.com/slackhq/nebula/header"
|
||||||
// "encoding/binary"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"errors"
|
"github.com/slackhq/nebula/udp"
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -21,14 +21,14 @@ const (
|
|||||||
minFwPacketLen = 4
|
minFwPacketLen = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) readOutsidePackets(addr *udpAddr, out []byte, packet []byte, header *Header, fwPacket *FirewallPacket, nb []byte) {
|
func (f *Interface) readOutsidePackets(addr *udp.Addr, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
err := header.Parse(packet)
|
err := h.Parse(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: best if we return this and let caller log
|
// TODO: best if we return this and let caller log
|
||||||
// TODO: Might be better to send the literal []byte("holepunch") packet and ignore that?
|
// TODO: Might be better to send the literal []byte("holepunch") packet and ignore that?
|
||||||
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
||||||
if len(packet) > 1 {
|
if len(packet) > 1 {
|
||||||
l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", addr, err)
|
f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", addr, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -36,31 +36,32 @@ func (f *Interface) readOutsidePackets(addr *udpAddr, out []byte, packet []byte,
|
|||||||
//l.Error("in packet ", header, packet[HeaderLen:])
|
//l.Error("in packet ", header, packet[HeaderLen:])
|
||||||
|
|
||||||
// verify if we've seen this index before, otherwise respond to the handshake initiation
|
// verify if we've seen this index before, otherwise respond to the handshake initiation
|
||||||
hostinfo, err := f.hostMap.QueryIndex(header.RemoteIndex)
|
hostinfo, err := f.hostMap.QueryIndex(h.RemoteIndex)
|
||||||
|
|
||||||
var ci *ConnectionState
|
var ci *ConnectionState
|
||||||
if err == nil {
|
if err == nil {
|
||||||
ci = hostinfo.ConnectionState
|
ci = hostinfo.ConnectionState
|
||||||
}
|
}
|
||||||
|
|
||||||
switch header.Type {
|
switch h.Type {
|
||||||
case message:
|
case header.Message:
|
||||||
if !f.handleEncrypted(ci, addr, header) {
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.decryptToTun(hostinfo, header.MessageCounter, out, packet, fwPacket, nb)
|
f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache)
|
||||||
|
|
||||||
// Fallthrough to the bottom to record incoming traffic
|
// Fallthrough to the bottom to record incoming traffic
|
||||||
|
|
||||||
case lightHouse:
|
case header.LightHouse:
|
||||||
if !f.handleEncrypted(ci, addr, header) {
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := f.decrypt(hostinfo, header.MessageCounter, out, packet, header, nb)
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger().WithError(err).WithField("udpAddr", addr).
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("packet", packet).
|
WithField("packet", packet).
|
||||||
Error("Failed to decrypt lighthouse packet")
|
Error("Failed to decrypt lighthouse packet")
|
||||||
|
|
||||||
@@ -69,18 +70,19 @@ func (f *Interface) readOutsidePackets(addr *udpAddr, out []byte, packet []byte,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.lightHouse.HandleRequest(addr, hostinfo.hostId, d, hostinfo.GetCert(), f)
|
lhf(addr, hostinfo.vpnIp, d, f)
|
||||||
|
|
||||||
// Fallthrough to the bottom to record incoming traffic
|
// Fallthrough to the bottom to record incoming traffic
|
||||||
|
|
||||||
case test:
|
case header.Test:
|
||||||
if !f.handleEncrypted(ci, addr, header) {
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := f.decrypt(hostinfo, header.MessageCounter, out, packet, header, nb)
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger().WithError(err).WithField("udpAddr", addr).
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("packet", packet).
|
WithField("packet", packet).
|
||||||
Error("Failed to decrypt test packet")
|
Error("Failed to decrypt test packet")
|
||||||
|
|
||||||
@@ -89,11 +91,11 @@ func (f *Interface) readOutsidePackets(addr *udpAddr, out []byte, packet []byte,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if header.Subtype == testRequest {
|
if h.Subtype == header.TestRequest {
|
||||||
// This testRequest might be from TryPromoteBest, so we should roam
|
// This testRequest might be from TryPromoteBest, so we should roam
|
||||||
// to the new IP address before responding
|
// to the new IP address before responding
|
||||||
f.handleHostRoaming(hostinfo, addr)
|
f.handleHostRoaming(hostinfo, addr)
|
||||||
f.send(test, testReply, ci, hostinfo, hostinfo.remote, d, nb, out)
|
f.send(header.Test, header.TestReply, ci, hostinfo, hostinfo.remote, d, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallthrough to the bottom to record incoming traffic
|
// Fallthrough to the bottom to record incoming traffic
|
||||||
@@ -101,77 +103,87 @@ func (f *Interface) readOutsidePackets(addr *udpAddr, out []byte, packet []byte,
|
|||||||
// Non encrypted messages below here, they should not fall through to avoid tracking incoming traffic since they
|
// Non encrypted messages below here, they should not fall through to avoid tracking incoming traffic since they
|
||||||
// are unauthenticated
|
// are unauthenticated
|
||||||
|
|
||||||
case handshake:
|
case header.Handshake:
|
||||||
HandleIncomingHandshake(f, addr, packet, header, hostinfo)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
|
HandleIncomingHandshake(f, addr, packet, h, hostinfo)
|
||||||
return
|
return
|
||||||
|
|
||||||
case recvError:
|
case header.RecvError:
|
||||||
// TODO: Remove this with recv_error deprecation
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
f.handleRecvError(addr, header)
|
f.handleRecvError(addr, h)
|
||||||
return
|
return
|
||||||
|
|
||||||
case closeTunnel:
|
case header.CloseTunnel:
|
||||||
if !f.handleEncrypted(ci, addr, header) {
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger().WithField("udpAddr", addr).
|
hostinfo.logger(f.l).WithField("udpAddr", addr).
|
||||||
Info("Close tunnel received, tearing down.")
|
Info("Close tunnel received, tearing down.")
|
||||||
|
|
||||||
f.closeTunnel(hostinfo)
|
f.closeTunnel(hostinfo, false)
|
||||||
return
|
return
|
||||||
|
|
||||||
default:
|
default:
|
||||||
hostinfo.logger().Debugf("Unexpected packet received from %s", addr)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
|
hostinfo.logger(f.l).Debugf("Unexpected packet received from %s", addr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.handleHostRoaming(hostinfo, addr)
|
f.handleHostRoaming(hostinfo, addr)
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.hostId)
|
f.connectionManager.In(hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) closeTunnel(hostInfo *HostInfo) {
|
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
||||||
|
func (f *Interface) closeTunnel(hostInfo *HostInfo, hasHostMapLock bool) {
|
||||||
//TODO: this would be better as a single function in ConnectionManager that handled locks appropriately
|
//TODO: this would be better as a single function in ConnectionManager that handled locks appropriately
|
||||||
f.connectionManager.ClearIP(hostInfo.hostId)
|
f.connectionManager.ClearIP(hostInfo.vpnIp)
|
||||||
f.connectionManager.ClearPendingDeletion(hostInfo.hostId)
|
f.connectionManager.ClearPendingDeletion(hostInfo.vpnIp)
|
||||||
f.lightHouse.DeleteVpnIP(hostInfo.hostId)
|
f.lightHouse.DeleteVpnIp(hostInfo.vpnIp)
|
||||||
f.hostMap.DeleteVpnIP(hostInfo.hostId)
|
|
||||||
f.hostMap.DeleteIndex(hostInfo.localIndexId)
|
if hasHostMapLock {
|
||||||
|
f.hostMap.unlockedDeleteHostInfo(hostInfo)
|
||||||
|
} else {
|
||||||
|
f.hostMap.DeleteHostInfo(hostInfo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udpAddr) {
|
// sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote
|
||||||
if hostDidRoam(hostinfo.remote, addr) {
|
func (f *Interface) sendCloseTunnel(h *HostInfo) {
|
||||||
if !f.lightHouse.remoteAllowList.Allow(udp2ipInt(addr)) {
|
f.send(header.CloseTunnel, 0, h.ConnectionState, h, h.remote, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
||||||
hostinfo.logger().WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming")
|
}
|
||||||
|
|
||||||
|
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udp.Addr) {
|
||||||
|
if !hostinfo.remote.Equals(addr) {
|
||||||
|
if !f.lightHouse.remoteAllowList.Allow(hostinfo.vpnIp, addr.IP) {
|
||||||
|
hostinfo.logger(f.l).WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !hostinfo.lastRoam.IsZero() && addr.Equals(hostinfo.lastRoamRemote) && time.Since(hostinfo.lastRoam) < RoamingSupressSeconds*time.Second {
|
if !hostinfo.lastRoam.IsZero() && addr.Equals(hostinfo.lastRoamRemote) && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second {
|
||||||
if l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger().WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
||||||
Debugf("Supressing roam back to previous remote for %d seconds", RoamingSupressSeconds)
|
Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger().WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
||||||
Info("Host roamed to new udp ip/port.")
|
Info("Host roamed to new udp ip/port.")
|
||||||
hostinfo.lastRoam = time.Now()
|
hostinfo.lastRoam = time.Now()
|
||||||
remoteCopy := *hostinfo.remote
|
remoteCopy := *hostinfo.remote
|
||||||
hostinfo.lastRoamRemote = &remoteCopy
|
hostinfo.lastRoamRemote = &remoteCopy
|
||||||
hostinfo.SetRemote(*addr)
|
hostinfo.SetRemote(addr)
|
||||||
if f.lightHouse.amLighthouse {
|
|
||||||
f.lightHouse.AddRemote(hostinfo.hostId, addr, false)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) handleEncrypted(ci *ConnectionState, addr *udpAddr, header *Header) bool {
|
func (f *Interface) handleEncrypted(ci *ConnectionState, addr *udp.Addr, h *header.H) bool {
|
||||||
// If connectionstate exists and the replay protector allows, process packet
|
// If connectionstate exists and the replay protector allows, process packet
|
||||||
// Else, send recv errors for 300 seconds after a restart to allow fast reconnection.
|
// Else, send recv errors for 300 seconds after a restart to allow fast reconnection.
|
||||||
if ci == nil || !ci.window.Check(header.MessageCounter) {
|
if ci == nil || !ci.window.Check(f.l, h.MessageCounter) {
|
||||||
f.sendRecvError(addr, header.RemoteIndex)
|
f.sendRecvError(addr, h.RemoteIndex)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,7 +191,7 @@ func (f *Interface) handleEncrypted(ci *ConnectionState, addr *udpAddr, header *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newPacket validates and parses the interesting bits for the firewall out of the ip and sub protocol headers
|
// newPacket validates and parses the interesting bits for the firewall out of the ip and sub protocol headers
|
||||||
func newPacket(data []byte, incoming bool, fp *FirewallPacket) error {
|
func newPacket(data []byte, incoming bool, fp *firewall.Packet) error {
|
||||||
// Do we at least have an ipv4 header worth of data?
|
// Do we at least have an ipv4 header worth of data?
|
||||||
if len(data) < ipv4.HeaderLen {
|
if len(data) < ipv4.HeaderLen {
|
||||||
return fmt.Errorf("packet is less than %v bytes", ipv4.HeaderLen)
|
return fmt.Errorf("packet is less than %v bytes", ipv4.HeaderLen)
|
||||||
@@ -207,7 +219,7 @@ func newPacket(data []byte, incoming bool, fp *FirewallPacket) error {
|
|||||||
|
|
||||||
// Accounting for a variable header length, do we have enough data for our src/dst tuples?
|
// Accounting for a variable header length, do we have enough data for our src/dst tuples?
|
||||||
minLen := ihl
|
minLen := ihl
|
||||||
if !fp.Fragment && fp.Protocol != fwProtoICMP {
|
if !fp.Fragment && fp.Protocol != firewall.ProtoICMP {
|
||||||
minLen += minFwPacketLen
|
minLen += minFwPacketLen
|
||||||
}
|
}
|
||||||
if len(data) < minLen {
|
if len(data) < minLen {
|
||||||
@@ -216,9 +228,9 @@ func newPacket(data []byte, incoming bool, fp *FirewallPacket) error {
|
|||||||
|
|
||||||
// Firewall packets are locally oriented
|
// Firewall packets are locally oriented
|
||||||
if incoming {
|
if incoming {
|
||||||
fp.RemoteIP = binary.BigEndian.Uint32(data[12:16])
|
fp.RemoteIP = iputil.Ip2VpnIp(data[12:16])
|
||||||
fp.LocalIP = binary.BigEndian.Uint32(data[16:20])
|
fp.LocalIP = iputil.Ip2VpnIp(data[16:20])
|
||||||
if fp.Fragment || fp.Protocol == fwProtoICMP {
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
||||||
fp.RemotePort = 0
|
fp.RemotePort = 0
|
||||||
fp.LocalPort = 0
|
fp.LocalPort = 0
|
||||||
} else {
|
} else {
|
||||||
@@ -226,9 +238,9 @@ func newPacket(data []byte, incoming bool, fp *FirewallPacket) error {
|
|||||||
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fp.LocalIP = binary.BigEndian.Uint32(data[12:16])
|
fp.LocalIP = iputil.Ip2VpnIp(data[12:16])
|
||||||
fp.RemoteIP = binary.BigEndian.Uint32(data[16:20])
|
fp.RemoteIP = iputil.Ip2VpnIp(data[16:20])
|
||||||
if fp.Fragment || fp.Protocol == fwProtoICMP {
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
||||||
fp.RemotePort = 0
|
fp.RemotePort = 0
|
||||||
fp.LocalPort = 0
|
fp.LocalPort = 0
|
||||||
} else {
|
} else {
|
||||||
@@ -240,15 +252,15 @@ func newPacket(data []byte, incoming bool, fp *FirewallPacket) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []byte, header *Header, nb []byte) ([]byte, error) {
|
func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []byte, h *header.H, nb []byte) ([]byte, error) {
|
||||||
var err error
|
var err error
|
||||||
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:HeaderLen], packet[HeaderLen:], mc, nb)
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], mc, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hostinfo.ConnectionState.window.Update(mc) {
|
if !hostinfo.ConnectionState.window.Update(f.l, mc) {
|
||||||
hostinfo.logger().WithField("header", header).
|
hostinfo.logger(f.l).WithField("header", h).
|
||||||
Debugln("dropping out of window packet")
|
Debugln("dropping out of window packet")
|
||||||
return nil, errors.New("out of window packet")
|
return nil, errors.New("out of window packet")
|
||||||
}
|
}
|
||||||
@@ -256,12 +268,12 @@ func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
|
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:HeaderLen], packet[HeaderLen:], messageCounter, nb)
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger().WithError(err).Error("Failed to decrypt packet")
|
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
||||||
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
||||||
//f.sendRecvError(hostinfo.remote, header.RemoteIndex)
|
//f.sendRecvError(hostinfo.remote, header.RemoteIndex)
|
||||||
return
|
return
|
||||||
@@ -269,79 +281,81 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
|
|
||||||
err = newPacket(out, true, fwPacket)
|
err = newPacket(out, true, fwPacket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger().WithError(err).WithField("packet", out).
|
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
|
||||||
Warnf("Error while validating inbound packet")
|
Warnf("Error while validating inbound packet")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hostinfo.ConnectionState.window.Update(messageCounter) {
|
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
|
||||||
hostinfo.logger().WithField("fwPacket", fwPacket).
|
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
||||||
Debugln("dropping out of window packet")
|
Debugln("dropping out of window packet")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.firewall.Drop(out, *fwPacket, true, hostinfo, trustedCAs) {
|
dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.caPool, localCache)
|
||||||
hostinfo.logger().WithField("fwPacket", fwPacket).
|
if dropReason != nil {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
||||||
|
WithField("reason", dropReason).
|
||||||
Debugln("dropping inbound packet")
|
Debugln("dropping inbound packet")
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.hostId)
|
f.connectionManager.In(hostinfo.vpnIp)
|
||||||
err = f.inside.WriteRaw(out)
|
_, err = f.readers[q].Write(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Failed to write to tun")
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendRecvError(endpoint *udpAddr, index uint32) {
|
func (f *Interface) sendRecvError(endpoint *udp.Addr, index uint32) {
|
||||||
f.metricTxRecvError.Inc(1)
|
f.messageMetrics.Tx(header.RecvError, 0, 1)
|
||||||
|
|
||||||
//TODO: this should be a signed message so we can trust that we should drop the index
|
//TODO: this should be a signed message so we can trust that we should drop the index
|
||||||
b := HeaderEncode(make([]byte, HeaderLen), Version, uint8(recvError), 0, index, 0)
|
b := header.Encode(make([]byte, header.Len), header.Version, header.RecvError, 0, index, 0)
|
||||||
f.outside.WriteTo(b, endpoint)
|
f.outside.WriteTo(b, endpoint)
|
||||||
if l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
l.WithField("index", index).
|
f.l.WithField("index", index).
|
||||||
WithField("udpAddr", endpoint).
|
WithField("udpAddr", endpoint).
|
||||||
Debug("Recv error sent")
|
Debug("Recv error sent")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) handleRecvError(addr *udpAddr, h *Header) {
|
func (f *Interface) handleRecvError(addr *udp.Addr, h *header.H) {
|
||||||
f.metricRxRecvError.Inc(1)
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("index", h.RemoteIndex).
|
||||||
// This flag is to stop caring about recv_error from old versions
|
|
||||||
// This should go away when the old version is gone from prod
|
|
||||||
if l.Level >= logrus.DebugLevel {
|
|
||||||
l.WithField("index", h.RemoteIndex).
|
|
||||||
WithField("udpAddr", addr).
|
WithField("udpAddr", addr).
|
||||||
Debug("Recv error received")
|
Debug("Recv error received")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// First, clean up in the pending hostmap
|
||||||
|
f.handshakeManager.pendingHostMap.DeleteReverseIndex(h.RemoteIndex)
|
||||||
|
|
||||||
hostinfo, err := f.hostMap.QueryReverseIndex(h.RemoteIndex)
|
hostinfo, err := f.hostMap.QueryReverseIndex(h.RemoteIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln(err, ": ", h.RemoteIndex)
|
f.l.Debugln(err, ": ", h.RemoteIndex)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hostinfo.Lock()
|
||||||
|
defer hostinfo.Unlock()
|
||||||
|
|
||||||
if !hostinfo.RecvErrorExceeded() {
|
if !hostinfo.RecvErrorExceeded() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if hostinfo.remote != nil && hostinfo.remote.String() != addr.String() {
|
if hostinfo.remote != nil && !hostinfo.remote.Equals(addr) {
|
||||||
l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
id := hostinfo.localIndexId
|
|
||||||
host := hostinfo.hostId
|
|
||||||
// We delete this host from the main hostmap
|
// We delete this host from the main hostmap
|
||||||
f.hostMap.DeleteIndex(id)
|
f.hostMap.DeleteHostInfo(hostinfo)
|
||||||
f.hostMap.DeleteVpnIP(host)
|
|
||||||
// We also delete it from pending to allow for
|
// We also delete it from pending to allow for
|
||||||
// fast reconnect. We must null the connectionstate
|
// fast reconnect. We must null the connectionstate
|
||||||
// or a counter reuse may happen
|
// or a counter reuse may happen
|
||||||
hostinfo.ConnectionState = nil
|
hostinfo.ConnectionState = nil
|
||||||
f.handshakeManager.DeleteIndex(id)
|
f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||||
f.handshakeManager.DeleteVpnIP(host)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -366,7 +380,7 @@ func (f *Interface) sendMeta(ci *ConnectionState, endpoint *net.UDPAddr, meta *N
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func RecombineCertAndValidate(h *noise.HandshakeState, rawCertBytes []byte) (*cert.NebulaCertificate, error) {
|
func RecombineCertAndValidate(h *noise.HandshakeState, rawCertBytes []byte, caPool *cert.NebulaCAPool) (*cert.NebulaCertificate, error) {
|
||||||
pk := h.PeerStatic()
|
pk := h.PeerStatic()
|
||||||
|
|
||||||
if pk == nil {
|
if pk == nil {
|
||||||
@@ -395,7 +409,7 @@ func RecombineCertAndValidate(h *noise.HandshakeState, rawCertBytes []byte) (*ce
|
|||||||
}
|
}
|
||||||
|
|
||||||
c, _ := cert.UnmarshalNebulaCertificate(recombined)
|
c, _ := cert.UnmarshalNebulaCertificate(recombined)
|
||||||
isValid, err := c.Verify(time.Now(), trustedCAs)
|
isValid, err := c.Verify(time.Now(), caPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return c, fmt.Errorf("certificate validation failed: %s", err)
|
return c, fmt.Errorf("certificate validation failed: %s", err)
|
||||||
} else if !isValid {
|
} else if !isValid {
|
||||||
|
|||||||
@@ -1,14 +1,17 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"golang.org/x/net/ipv4"
|
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/net/ipv4"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_newPacket(t *testing.T) {
|
func Test_newPacket(t *testing.T) {
|
||||||
p := &FirewallPacket{}
|
p := &firewall.Packet{}
|
||||||
|
|
||||||
// length fail
|
// length fail
|
||||||
err := newPacket([]byte{0, 1}, true, p)
|
err := newPacket([]byte{0, 1}, true, p)
|
||||||
@@ -43,7 +46,7 @@ func Test_newPacket(t *testing.T) {
|
|||||||
Src: net.IPv4(10, 0, 0, 1),
|
Src: net.IPv4(10, 0, 0, 1),
|
||||||
Dst: net.IPv4(10, 0, 0, 2),
|
Dst: net.IPv4(10, 0, 0, 2),
|
||||||
Options: []byte{0, 1, 0, 2},
|
Options: []byte{0, 1, 0, 2},
|
||||||
Protocol: fwProtoTCP,
|
Protocol: firewall.ProtoTCP,
|
||||||
}
|
}
|
||||||
|
|
||||||
b, _ = h.Marshal()
|
b, _ = h.Marshal()
|
||||||
@@ -51,9 +54,9 @@ func Test_newPacket(t *testing.T) {
|
|||||||
err = newPacket(b, true, p)
|
err = newPacket(b, true, p)
|
||||||
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, p.Protocol, uint8(fwProtoTCP))
|
assert.Equal(t, p.Protocol, uint8(firewall.ProtoTCP))
|
||||||
assert.Equal(t, p.LocalIP, ip2int(net.IPv4(10, 0, 0, 2)))
|
assert.Equal(t, p.LocalIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 2)))
|
||||||
assert.Equal(t, p.RemoteIP, ip2int(net.IPv4(10, 0, 0, 1)))
|
assert.Equal(t, p.RemoteIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 1)))
|
||||||
assert.Equal(t, p.RemotePort, uint16(3))
|
assert.Equal(t, p.RemotePort, uint16(3))
|
||||||
assert.Equal(t, p.LocalPort, uint16(4))
|
assert.Equal(t, p.LocalPort, uint16(4))
|
||||||
|
|
||||||
@@ -73,8 +76,8 @@ func Test_newPacket(t *testing.T) {
|
|||||||
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, p.Protocol, uint8(2))
|
assert.Equal(t, p.Protocol, uint8(2))
|
||||||
assert.Equal(t, p.LocalIP, ip2int(net.IPv4(10, 0, 0, 1)))
|
assert.Equal(t, p.LocalIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 1)))
|
||||||
assert.Equal(t, p.RemoteIP, ip2int(net.IPv4(10, 0, 0, 2)))
|
assert.Equal(t, p.RemoteIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 2)))
|
||||||
assert.Equal(t, p.RemotePort, uint16(6))
|
assert.Equal(t, p.RemotePort, uint16(6))
|
||||||
assert.Equal(t, p.LocalPort, uint16(5))
|
assert.Equal(t, p.LocalPort, uint16(5))
|
||||||
}
|
}
|
||||||
|
|||||||
183
psk.go
Normal file
183
psk.go
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"golang.org/x/crypto/hkdf"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrNotAPskMode = errors.New("not a psk mode")
|
||||||
|
var ErrKeyTooShort = errors.New("key is too short")
|
||||||
|
var ErrNotEnoughPskKeys = errors.New("at least 1 key is required")
|
||||||
|
|
||||||
|
// The minimum length that we accept for a user defined psk, the choice is arbitrary
|
||||||
|
const MinPskLength = 8
|
||||||
|
|
||||||
|
type PskMode int
|
||||||
|
|
||||||
|
func (p PskMode) String() string {
|
||||||
|
switch p {
|
||||||
|
case PskNone:
|
||||||
|
return "none"
|
||||||
|
case PskTransitionalAccepting:
|
||||||
|
return "transitional-accepting"
|
||||||
|
case PskTransitionalSending:
|
||||||
|
return "transitional-sending"
|
||||||
|
case PskEnforced:
|
||||||
|
return "enforced"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPskMode(m string) (PskMode, error) {
|
||||||
|
switch m {
|
||||||
|
case "none":
|
||||||
|
return PskNone, nil
|
||||||
|
case "transitional-accepting":
|
||||||
|
return PskTransitionalAccepting, nil
|
||||||
|
case "transitional-sending":
|
||||||
|
return PskTransitionalSending, nil
|
||||||
|
case "enforced":
|
||||||
|
return PskEnforced, nil
|
||||||
|
}
|
||||||
|
return PskNone, ErrNotAPskMode
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
PskNone PskMode = 0
|
||||||
|
PskTransitionalAccepting PskMode = 1
|
||||||
|
PskTransitionalSending PskMode = 2
|
||||||
|
PskEnforced PskMode = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
type Psk struct {
|
||||||
|
// pskMode sets how psk works, ignored, allowed for incoming, or enforced for all
|
||||||
|
mode PskMode
|
||||||
|
|
||||||
|
// Cache holds all pre-computed psk hkdfs
|
||||||
|
// Handshakes iterate this directly
|
||||||
|
Cache [][]byte
|
||||||
|
|
||||||
|
// The key has already been extracted and is ready to be expanded for use
|
||||||
|
// MakeFor does the final expand step mixing in the intended recipients vpn ip
|
||||||
|
key []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPskFromConfig is a helper for initial boot and config reloading.
|
||||||
|
func NewPskFromConfig(c *config.C, myVpnIp iputil.VpnIp) (*Psk, error) {
|
||||||
|
sMode := c.GetString("handshakes.psk.mode", "none")
|
||||||
|
mode, err := NewPskMode(sMode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewContextualError("Could not parse handshakes.psk.mode", m{"mode": mode}, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewPsk(
|
||||||
|
mode,
|
||||||
|
c.GetStringSlice("handshakes.psk.keys", nil),
|
||||||
|
myVpnIp,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPsk creates a new Psk object and handles the caching of all accepted keys and preparation of the primary key
|
||||||
|
func NewPsk(mode PskMode, keys []string, myVpnIp iputil.VpnIp) (*Psk, error) {
|
||||||
|
psk := &Psk{
|
||||||
|
mode: mode,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := psk.preparePrimaryKey(keys)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = psk.cachePsks(myVpnIp, keys)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return psk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeFor if we are in enforced mode, the final hkdf expand stage is done on the pre extracted primary key,
|
||||||
|
// mixing in the intended recipients vpn ip and the result is returned.
|
||||||
|
// If we are transitional or not using psks, an empty byte slice is returned
|
||||||
|
func (p *Psk) MakeFor(vpnIp iputil.VpnIp) ([]byte, error) {
|
||||||
|
if p.mode == PskNone || p.mode == PskTransitionalAccepting {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hmacKey := make([]byte, sha256.Size)
|
||||||
|
_, err := io.ReadFull(hkdf.Expand(sha256.New, p.key, vpnIp.ToIP()), hmacKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hmacKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cachePsks generates all psks we accept and caches them to speed up handshaking
|
||||||
|
func (p *Psk) cachePsks(myVpnIp iputil.VpnIp, keys []string) error {
|
||||||
|
// If PskNone is set then we are using the nil byte array for a psk, we can return
|
||||||
|
if p.mode == PskNone {
|
||||||
|
p.Cache = [][]byte{nil}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) < 1 {
|
||||||
|
return ErrNotEnoughPskKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Cache = [][]byte{}
|
||||||
|
|
||||||
|
if p.mode == PskTransitionalAccepting || p.mode == PskTransitionalSending {
|
||||||
|
// We are transitional, we accept empty psks
|
||||||
|
p.Cache = append(p.Cache, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are either PskAuto or PskTransitional, build all possibilities
|
||||||
|
for i, rk := range keys {
|
||||||
|
k, err := sha256KdfFromString(rk, myVpnIp)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate key for position %v: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Cache = append(p.Cache, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// preparePrimaryKey if we are in enforced mode, will do an hkdf extract on the first key to benefit
|
||||||
|
// outgoing handshake performance, MakeFor does the final expand step
|
||||||
|
func (p *Psk) preparePrimaryKey(keys []string) error {
|
||||||
|
if p.mode == PskNone || p.mode == PskTransitionalAccepting {
|
||||||
|
// If we aren't enforcing then there is nothing to prepare
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) < 1 {
|
||||||
|
return ErrNotEnoughPskKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
p.key = hkdf.Extract(sha256.New, []byte(keys[0]), nil)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sha256KdfFromString generates a full hkdf
|
||||||
|
func sha256KdfFromString(secret string, vpnIp iputil.VpnIp) ([]byte, error) {
|
||||||
|
if len(secret) < MinPskLength {
|
||||||
|
return nil, ErrKeyTooShort
|
||||||
|
}
|
||||||
|
|
||||||
|
hmacKey := make([]byte, sha256.Size)
|
||||||
|
_, err := io.ReadFull(hkdf.New(sha256.New, []byte(secret), nil, vpnIp.ToIP()), hmacKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return hmacKey, nil
|
||||||
|
}
|
||||||
103
psk_test.go
Normal file
103
psk_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewPsk(t *testing.T) {
|
||||||
|
t.Run("mode none", func(t *testing.T) {
|
||||||
|
p, err := NewPsk(PskNone, nil, 1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, PskNone, p.mode)
|
||||||
|
assert.Empty(t, p.key)
|
||||||
|
|
||||||
|
assert.Len(t, p.Cache, 1)
|
||||||
|
assert.Nil(t, p.Cache[0])
|
||||||
|
|
||||||
|
b, err := p.MakeFor(0)
|
||||||
|
assert.Equal(t, []byte{}, b)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("mode transitional-accepting", func(t *testing.T) {
|
||||||
|
p, err := NewPsk(PskTransitionalAccepting, nil, 1)
|
||||||
|
assert.Error(t, ErrNotEnoughPskKeys, err)
|
||||||
|
|
||||||
|
p, err = NewPsk(PskTransitionalAccepting, []string{"1234567"}, 1)
|
||||||
|
assert.Error(t, ErrKeyTooShort)
|
||||||
|
|
||||||
|
p, err = NewPsk(PskTransitionalAccepting, []string{"hi there friends"}, 1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, PskTransitionalAccepting, p.mode)
|
||||||
|
assert.Empty(t, p.key)
|
||||||
|
|
||||||
|
assert.Len(t, p.Cache, 2)
|
||||||
|
assert.Nil(t, p.Cache[0])
|
||||||
|
|
||||||
|
expectedCache := []byte{146, 120, 135, 31, 158, 102, 45, 189, 128, 190, 37, 101, 58, 254, 6, 166, 91, 209, 148, 131, 27, 193, 24, 25, 170, 65, 130, 189, 7, 179, 255, 17}
|
||||||
|
assert.Equal(t, expectedCache, p.Cache[1])
|
||||||
|
|
||||||
|
b, err := p.MakeFor(0)
|
||||||
|
assert.Equal(t, []byte{}, b)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("mode transitional-sending", func(t *testing.T) {
|
||||||
|
p, err := NewPsk(PskTransitionalSending, nil, 1)
|
||||||
|
assert.Error(t, ErrNotEnoughPskKeys, err)
|
||||||
|
|
||||||
|
p, err = NewPsk(PskTransitionalSending, []string{"1234567"}, 1)
|
||||||
|
assert.Error(t, ErrKeyTooShort)
|
||||||
|
|
||||||
|
p, err = NewPsk(PskTransitionalSending, []string{"hi there friends"}, 1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, PskTransitionalSending, p.mode)
|
||||||
|
|
||||||
|
expectedKey := []byte{0x9c, 0x67, 0xab, 0x58, 0x79, 0x5c, 0x8a, 0xf0, 0xaa, 0xf0, 0x4c, 0x6c, 0x9a, 0x42, 0x6b, 0xe, 0xe2, 0x94, 0xb1, 0x0, 0x28, 0x1c, 0xdc, 0x88, 0x44, 0x35, 0x3f, 0xb7, 0xd5, 0x9, 0xc0, 0xda}
|
||||||
|
assert.Equal(t, expectedKey, p.key)
|
||||||
|
|
||||||
|
assert.Len(t, p.Cache, 2)
|
||||||
|
assert.Nil(t, p.Cache[0])
|
||||||
|
|
||||||
|
expectedCache := []byte{146, 120, 135, 31, 158, 102, 45, 189, 128, 190, 37, 101, 58, 254, 6, 166, 91, 209, 148, 131, 27, 193, 24, 25, 170, 65, 130, 189, 7, 179, 255, 17}
|
||||||
|
assert.Equal(t, expectedCache, p.Cache[1])
|
||||||
|
|
||||||
|
expectedPsk := []byte{0xd9, 0x16, 0xa3, 0x66, 0x6a, 0x20, 0x26, 0xcf, 0x5d, 0x93, 0xad, 0xa3, 0x88, 0x2d, 0x57, 0xac, 0x9b, 0xc3, 0x5a, 0xb7, 0x8f, 0x6, 0x71, 0xc4, 0x3e, 0x5, 0x9e, 0xbc, 0x4e, 0xc8, 0x24, 0x17}
|
||||||
|
b, err := p.MakeFor(0)
|
||||||
|
assert.Equal(t, expectedPsk, b)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("mode enforced", func(t *testing.T) {
|
||||||
|
p, err := NewPsk(PskEnforced, nil, 1)
|
||||||
|
assert.Error(t, ErrNotEnoughPskKeys, err)
|
||||||
|
|
||||||
|
p, err = NewPsk(PskEnforced, []string{"hi there friends"}, 1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, PskEnforced, p.mode)
|
||||||
|
|
||||||
|
expectedKey := []byte{156, 103, 171, 88, 121, 92, 138, 240, 170, 240, 76, 108, 154, 66, 107, 14, 226, 148, 177, 0, 40, 28, 220, 136, 68, 53, 63, 183, 213, 9, 192, 218}
|
||||||
|
assert.Equal(t, expectedKey, p.key)
|
||||||
|
|
||||||
|
assert.Len(t, p.Cache, 1)
|
||||||
|
expectedCache := []byte{146, 120, 135, 31, 158, 102, 45, 189, 128, 190, 37, 101, 58, 254, 6, 166, 91, 209, 148, 131, 27, 193, 24, 25, 170, 65, 130, 189, 7, 179, 255, 17}
|
||||||
|
assert.Equal(t, expectedCache, p.Cache[0])
|
||||||
|
|
||||||
|
expectedPsk := []byte{0xd9, 0x16, 0xa3, 0x66, 0x6a, 0x20, 0x26, 0xcf, 0x5d, 0x93, 0xad, 0xa3, 0x88, 0x2d, 0x57, 0xac, 0x9b, 0xc3, 0x5a, 0xb7, 0x8f, 0x6, 0x71, 0xc4, 0x3e, 0x5, 0x9e, 0xbc, 0x4e, 0xc8, 0x24, 0x17}
|
||||||
|
b, err := p.MakeFor(0)
|
||||||
|
assert.Equal(t, expectedPsk, b)
|
||||||
|
|
||||||
|
// Make sure different vpn ips generate different psks
|
||||||
|
expectedPsk = []byte{0x92, 0x78, 0x87, 0x1f, 0x9e, 0x66, 0x2d, 0xbd, 0x80, 0xbe, 0x25, 0x65, 0x3a, 0xfe, 0x6, 0xa6, 0x5b, 0xd1, 0x94, 0x83, 0x1b, 0xc1, 0x18, 0x19, 0xaa, 0x41, 0x82, 0xbd, 0x7, 0xb3, 0xff, 0x11}
|
||||||
|
b, err = p.MakeFor(1)
|
||||||
|
assert.Equal(t, expectedPsk, b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkPsk_MakeFor(b *testing.B) {
|
||||||
|
p, err := NewPsk(PskEnforced, []string{"hi there friends"}, 1)
|
||||||
|
assert.NoError(b, err)
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
p.MakeFor(99)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,10 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
)
|
||||||
|
|
||||||
type Punchy struct {
|
type Punchy struct {
|
||||||
Punch bool
|
Punch bool
|
||||||
@@ -8,7 +12,7 @@ type Punchy struct {
|
|||||||
Delay time.Duration
|
Delay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPunchyFromConfig(c *Config) *Punchy {
|
func NewPunchyFromConfig(c *config.C) *Punchy {
|
||||||
p := &Punchy{}
|
p := &Punchy{}
|
||||||
|
|
||||||
if c.IsSet("punchy.punch") {
|
if c.IsSet("punchy.punch") {
|
||||||
|
|||||||
@@ -1,13 +1,17 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/util"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewPunchyFromConfig(t *testing.T) {
|
func TestNewPunchyFromConfig(t *testing.T) {
|
||||||
c := NewConfig()
|
l := util.NewTestLogger()
|
||||||
|
c := config.NewC(l)
|
||||||
|
|
||||||
// Test defaults
|
// Test defaults
|
||||||
p := NewPunchyFromConfig(c)
|
p := NewPunchyFromConfig(c)
|
||||||
|
|||||||
507
remote_list.go
Normal file
507
remote_list.go
Normal file
@@ -0,0 +1,507 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"net"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// forEachFunc is used to benefit folks that want to do work inside the lock
|
||||||
|
type forEachFunc func(addr *udp.Addr, preferred bool)
|
||||||
|
|
||||||
|
// The checkFuncs here are to simplify bulk importing LH query response logic into a single function (reset slice and iterate)
|
||||||
|
type checkFuncV4 func(vpnIp iputil.VpnIp, to *Ip4AndPort) bool
|
||||||
|
type checkFuncV6 func(vpnIp iputil.VpnIp, to *Ip6AndPort) bool
|
||||||
|
|
||||||
|
// CacheMap is a struct that better represents the lighthouse cache for humans
|
||||||
|
// The string key is the owners vpnIp
|
||||||
|
type CacheMap map[string]*Cache
|
||||||
|
|
||||||
|
// Cache is the other part of CacheMap to better represent the lighthouse cache for humans
|
||||||
|
// We don't reason about ipv4 vs ipv6 here
|
||||||
|
type Cache struct {
|
||||||
|
Learned []*udp.Addr `json:"learned,omitempty"`
|
||||||
|
Reported []*udp.Addr `json:"reported,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: Seems like we should plop static host entries in here too since the are protected by the lighthouse from deletion
|
||||||
|
// We will never clean learned/reported information for them as it stands today
|
||||||
|
|
||||||
|
// cache is an internal struct that splits v4 and v6 addresses inside the cache map
|
||||||
|
type cache struct {
|
||||||
|
v4 *cacheV4
|
||||||
|
v6 *cacheV6
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheV4 stores learned and reported ipv4 records under cache
|
||||||
|
type cacheV4 struct {
|
||||||
|
learned *Ip4AndPort
|
||||||
|
reported []*Ip4AndPort
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheV4 stores learned and reported ipv6 records under cache
|
||||||
|
type cacheV6 struct {
|
||||||
|
learned *Ip6AndPort
|
||||||
|
reported []*Ip6AndPort
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteList is a unifying concept for lighthouse servers and clients as well as hostinfos.
|
||||||
|
// It serves as a local cache of query replies, host update notifications, and locally learned addresses
|
||||||
|
type RemoteList struct {
|
||||||
|
// Every interaction with internals requires a lock!
|
||||||
|
sync.RWMutex
|
||||||
|
|
||||||
|
// A deduplicated set of addresses. Any accessor should lock beforehand.
|
||||||
|
addrs []*udp.Addr
|
||||||
|
|
||||||
|
// These are maps to store v4 and v6 addresses per lighthouse
|
||||||
|
// Map key is the vpnIp of the person that told us about this the cached entries underneath.
|
||||||
|
// For learned addresses, this is the vpnIp that sent the packet
|
||||||
|
cache map[iputil.VpnIp]*cache
|
||||||
|
|
||||||
|
// This is a list of remotes that we have tried to handshake with and have returned from the wrong vpn ip.
|
||||||
|
// They should not be tried again during a handshake
|
||||||
|
badRemotes []*udp.Addr
|
||||||
|
|
||||||
|
// A flag that the cache may have changed and addrs needs to be rebuilt
|
||||||
|
shouldRebuild bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRemoteList creates a new empty RemoteList
|
||||||
|
func NewRemoteList() *RemoteList {
|
||||||
|
return &RemoteList{
|
||||||
|
addrs: make([]*udp.Addr, 0),
|
||||||
|
cache: make(map[iputil.VpnIp]*cache),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len locks and reports the size of the deduplicated address list
|
||||||
|
// The deduplication work may need to occur here, so you must pass preferredRanges
|
||||||
|
func (r *RemoteList) Len(preferredRanges []*net.IPNet) int {
|
||||||
|
r.Rebuild(preferredRanges)
|
||||||
|
r.RLock()
|
||||||
|
defer r.RUnlock()
|
||||||
|
return len(r.addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForEach locks and will call the forEachFunc for every deduplicated address in the list
|
||||||
|
// The deduplication work may need to occur here, so you must pass preferredRanges
|
||||||
|
func (r *RemoteList) ForEach(preferredRanges []*net.IPNet, forEach forEachFunc) {
|
||||||
|
r.Rebuild(preferredRanges)
|
||||||
|
r.RLock()
|
||||||
|
for _, v := range r.addrs {
|
||||||
|
forEach(v, isPreferred(v.IP, preferredRanges))
|
||||||
|
}
|
||||||
|
r.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyAddrs locks and makes a deep copy of the deduplicated address list
|
||||||
|
// The deduplication work may need to occur here, so you must pass preferredRanges
|
||||||
|
func (r *RemoteList) CopyAddrs(preferredRanges []*net.IPNet) []*udp.Addr {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Rebuild(preferredRanges)
|
||||||
|
|
||||||
|
r.RLock()
|
||||||
|
defer r.RUnlock()
|
||||||
|
c := make([]*udp.Addr, len(r.addrs))
|
||||||
|
for i, v := range r.addrs {
|
||||||
|
c[i] = v.Copy()
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// LearnRemote locks and sets the learned slot for the owner vpn ip to the provided addr
|
||||||
|
// Currently this is only needed when HostInfo.SetRemote is called as that should cover both handshaking and roaming.
|
||||||
|
// It will mark the deduplicated address list as dirty, so do not call it unless new information is available
|
||||||
|
//TODO: this needs to support the allow list list
|
||||||
|
func (r *RemoteList) LearnRemote(ownerVpnIp iputil.VpnIp, addr *udp.Addr) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
if v4 := addr.IP.To4(); v4 != nil {
|
||||||
|
r.unlockedSetLearnedV4(ownerVpnIp, NewIp4AndPort(v4, uint32(addr.Port)))
|
||||||
|
} else {
|
||||||
|
r.unlockedSetLearnedV6(ownerVpnIp, NewIp6AndPort(addr.IP, uint32(addr.Port)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyCache locks and creates a more human friendly form of the internal address cache.
|
||||||
|
// This may contain duplicates and blocked addresses
|
||||||
|
func (r *RemoteList) CopyCache() *CacheMap {
|
||||||
|
r.RLock()
|
||||||
|
defer r.RUnlock()
|
||||||
|
|
||||||
|
cm := make(CacheMap)
|
||||||
|
getOrMake := func(vpnIp string) *Cache {
|
||||||
|
c := cm[vpnIp]
|
||||||
|
if c == nil {
|
||||||
|
c = &Cache{
|
||||||
|
Learned: make([]*udp.Addr, 0),
|
||||||
|
Reported: make([]*udp.Addr, 0),
|
||||||
|
}
|
||||||
|
cm[vpnIp] = c
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
for owner, mc := range r.cache {
|
||||||
|
c := getOrMake(owner.String())
|
||||||
|
|
||||||
|
if mc.v4 != nil {
|
||||||
|
if mc.v4.learned != nil {
|
||||||
|
c.Learned = append(c.Learned, NewUDPAddrFromLH4(mc.v4.learned))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range mc.v4.reported {
|
||||||
|
c.Reported = append(c.Reported, NewUDPAddrFromLH4(a))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mc.v6 != nil {
|
||||||
|
if mc.v6.learned != nil {
|
||||||
|
c.Learned = append(c.Learned, NewUDPAddrFromLH6(mc.v6.learned))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range mc.v6.reported {
|
||||||
|
c.Reported = append(c.Reported, NewUDPAddrFromLH6(a))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cm
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockRemote locks and records the address as bad, it will be excluded from the deduplicated address list
|
||||||
|
func (r *RemoteList) BlockRemote(bad *udp.Addr) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
// Check if we already blocked this addr
|
||||||
|
if r.unlockedIsBad(bad) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// We copy here because we are taking something else's memory and we can't trust everything
|
||||||
|
r.badRemotes = append(r.badRemotes, bad.Copy())
|
||||||
|
|
||||||
|
// Mark the next interaction must recollect/dedupe
|
||||||
|
r.shouldRebuild = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyBlockedRemotes locks and makes a deep copy of the blocked remotes list
|
||||||
|
func (r *RemoteList) CopyBlockedRemotes() []*udp.Addr {
|
||||||
|
r.RLock()
|
||||||
|
defer r.RUnlock()
|
||||||
|
|
||||||
|
c := make([]*udp.Addr, len(r.badRemotes))
|
||||||
|
for i, v := range r.badRemotes {
|
||||||
|
c[i] = v.Copy()
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetBlockedRemotes locks and clears the blocked remotes list
|
||||||
|
func (r *RemoteList) ResetBlockedRemotes() {
|
||||||
|
r.Lock()
|
||||||
|
r.badRemotes = nil
|
||||||
|
r.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebuild locks and generates the deduplicated address list only if there is work to be done
|
||||||
|
// There is generally no reason to call this directly but it is safe to do so
|
||||||
|
func (r *RemoteList) Rebuild(preferredRanges []*net.IPNet) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
// Only rebuild if the cache changed
|
||||||
|
//TODO: shouldRebuild is probably pointless as we don't check for actual change when lighthouse updates come in
|
||||||
|
if r.shouldRebuild {
|
||||||
|
r.unlockedCollect()
|
||||||
|
r.shouldRebuild = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Always re-sort, preferredRanges can change via HUP
|
||||||
|
r.unlockedSort(preferredRanges)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedIsBad assumes you have the write lock and checks if the remote matches any entry in the blocked address list
|
||||||
|
func (r *RemoteList) unlockedIsBad(remote *udp.Addr) bool {
|
||||||
|
for _, v := range r.badRemotes {
|
||||||
|
if v.Equals(remote) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedSetLearnedV4 assumes you have the write lock and sets the current learned address for this owner and marks the
|
||||||
|
// deduplicated address list as dirty
|
||||||
|
func (r *RemoteList) unlockedSetLearnedV4(ownerVpnIp iputil.VpnIp, to *Ip4AndPort) {
|
||||||
|
r.shouldRebuild = true
|
||||||
|
r.unlockedGetOrMakeV4(ownerVpnIp).learned = to
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedSetV4 assumes you have the write lock and resets the reported list of ips for this owner to the list provided
|
||||||
|
// and marks the deduplicated address list as dirty
|
||||||
|
func (r *RemoteList) unlockedSetV4(ownerVpnIp iputil.VpnIp, vpnIp iputil.VpnIp, to []*Ip4AndPort, check checkFuncV4) {
|
||||||
|
r.shouldRebuild = true
|
||||||
|
c := r.unlockedGetOrMakeV4(ownerVpnIp)
|
||||||
|
|
||||||
|
// Reset the slice
|
||||||
|
c.reported = c.reported[:0]
|
||||||
|
|
||||||
|
// We can't take their array but we can take their pointers
|
||||||
|
for _, v := range to[:minInt(len(to), MaxRemotes)] {
|
||||||
|
if check(vpnIp, v) {
|
||||||
|
c.reported = append(c.reported, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedPrependV4 assumes you have the write lock and prepends the address in the reported list for this owner
|
||||||
|
// This is only useful for establishing static hosts
|
||||||
|
func (r *RemoteList) unlockedPrependV4(ownerVpnIp iputil.VpnIp, to *Ip4AndPort) {
|
||||||
|
r.shouldRebuild = true
|
||||||
|
c := r.unlockedGetOrMakeV4(ownerVpnIp)
|
||||||
|
|
||||||
|
// We are doing the easy append because this is rarely called
|
||||||
|
c.reported = append([]*Ip4AndPort{to}, c.reported...)
|
||||||
|
if len(c.reported) > MaxRemotes {
|
||||||
|
c.reported = c.reported[:MaxRemotes]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedSetLearnedV6 assumes you have the write lock and sets the current learned address for this owner and marks the
|
||||||
|
// deduplicated address list as dirty
|
||||||
|
func (r *RemoteList) unlockedSetLearnedV6(ownerVpnIp iputil.VpnIp, to *Ip6AndPort) {
|
||||||
|
r.shouldRebuild = true
|
||||||
|
r.unlockedGetOrMakeV6(ownerVpnIp).learned = to
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedSetV6 assumes you have the write lock and resets the reported list of ips for this owner to the list provided
|
||||||
|
// and marks the deduplicated address list as dirty
|
||||||
|
func (r *RemoteList) unlockedSetV6(ownerVpnIp iputil.VpnIp, vpnIp iputil.VpnIp, to []*Ip6AndPort, check checkFuncV6) {
|
||||||
|
r.shouldRebuild = true
|
||||||
|
c := r.unlockedGetOrMakeV6(ownerVpnIp)
|
||||||
|
|
||||||
|
// Reset the slice
|
||||||
|
c.reported = c.reported[:0]
|
||||||
|
|
||||||
|
// We can't take their array but we can take their pointers
|
||||||
|
for _, v := range to[:minInt(len(to), MaxRemotes)] {
|
||||||
|
if check(vpnIp, v) {
|
||||||
|
c.reported = append(c.reported, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedPrependV6 assumes you have the write lock and prepends the address in the reported list for this owner
|
||||||
|
// This is only useful for establishing static hosts
|
||||||
|
func (r *RemoteList) unlockedPrependV6(ownerVpnIp iputil.VpnIp, to *Ip6AndPort) {
|
||||||
|
r.shouldRebuild = true
|
||||||
|
c := r.unlockedGetOrMakeV6(ownerVpnIp)
|
||||||
|
|
||||||
|
// We are doing the easy append because this is rarely called
|
||||||
|
c.reported = append([]*Ip6AndPort{to}, c.reported...)
|
||||||
|
if len(c.reported) > MaxRemotes {
|
||||||
|
c.reported = c.reported[:MaxRemotes]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedGetOrMakeV4 assumes you have the write lock and builds the cache and owner entry. Only the v4 pointer is established.
|
||||||
|
// The caller must dirty the learned address cache if required
|
||||||
|
func (r *RemoteList) unlockedGetOrMakeV4(ownerVpnIp iputil.VpnIp) *cacheV4 {
|
||||||
|
am := r.cache[ownerVpnIp]
|
||||||
|
if am == nil {
|
||||||
|
am = &cache{}
|
||||||
|
r.cache[ownerVpnIp] = am
|
||||||
|
}
|
||||||
|
// Avoid occupying memory for v6 addresses if we never have any
|
||||||
|
if am.v4 == nil {
|
||||||
|
am.v4 = &cacheV4{}
|
||||||
|
}
|
||||||
|
return am.v4
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedGetOrMakeV6 assumes you have the write lock and builds the cache and owner entry. Only the v6 pointer is established.
|
||||||
|
// The caller must dirty the learned address cache if required
|
||||||
|
func (r *RemoteList) unlockedGetOrMakeV6(ownerVpnIp iputil.VpnIp) *cacheV6 {
|
||||||
|
am := r.cache[ownerVpnIp]
|
||||||
|
if am == nil {
|
||||||
|
am = &cache{}
|
||||||
|
r.cache[ownerVpnIp] = am
|
||||||
|
}
|
||||||
|
// Avoid occupying memory for v4 addresses if we never have any
|
||||||
|
if am.v6 == nil {
|
||||||
|
am.v6 = &cacheV6{}
|
||||||
|
}
|
||||||
|
return am.v6
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedCollect assumes you have the write lock and collects/transforms the cache into the deduped address list.
|
||||||
|
// The result of this function can contain duplicates. unlockedSort handles cleaning it.
|
||||||
|
func (r *RemoteList) unlockedCollect() {
|
||||||
|
addrs := r.addrs[:0]
|
||||||
|
|
||||||
|
for _, c := range r.cache {
|
||||||
|
if c.v4 != nil {
|
||||||
|
if c.v4.learned != nil {
|
||||||
|
u := NewUDPAddrFromLH4(c.v4.learned)
|
||||||
|
if !r.unlockedIsBad(u) {
|
||||||
|
addrs = append(addrs, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range c.v4.reported {
|
||||||
|
u := NewUDPAddrFromLH4(v)
|
||||||
|
if !r.unlockedIsBad(u) {
|
||||||
|
addrs = append(addrs, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.v6 != nil {
|
||||||
|
if c.v6.learned != nil {
|
||||||
|
u := NewUDPAddrFromLH6(c.v6.learned)
|
||||||
|
if !r.unlockedIsBad(u) {
|
||||||
|
addrs = append(addrs, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range c.v6.reported {
|
||||||
|
u := NewUDPAddrFromLH6(v)
|
||||||
|
if !r.unlockedIsBad(u) {
|
||||||
|
addrs = append(addrs, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.addrs = addrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockedSort assumes you have the write lock and performs the deduping and sorting of the address list
|
||||||
|
func (r *RemoteList) unlockedSort(preferredRanges []*net.IPNet) {
|
||||||
|
n := len(r.addrs)
|
||||||
|
if n < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lessFunc := func(i, j int) bool {
|
||||||
|
a := r.addrs[i]
|
||||||
|
b := r.addrs[j]
|
||||||
|
// Preferred addresses first
|
||||||
|
|
||||||
|
aPref := isPreferred(a.IP, preferredRanges)
|
||||||
|
bPref := isPreferred(b.IP, preferredRanges)
|
||||||
|
switch {
|
||||||
|
case aPref && !bPref:
|
||||||
|
// If i is preferred and j is not, i is less than j
|
||||||
|
return true
|
||||||
|
|
||||||
|
case !aPref && bPref:
|
||||||
|
// If j is preferred then i is not due to the else, i is not less than j
|
||||||
|
return false
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Both i an j are either preferred or not, sort within that
|
||||||
|
}
|
||||||
|
|
||||||
|
// ipv6 addresses 2nd
|
||||||
|
a4 := a.IP.To4()
|
||||||
|
b4 := b.IP.To4()
|
||||||
|
switch {
|
||||||
|
case a4 == nil && b4 != nil:
|
||||||
|
// If i is v6 and j is v4, i is less than j
|
||||||
|
return true
|
||||||
|
|
||||||
|
case a4 != nil && b4 == nil:
|
||||||
|
// If j is v6 and i is v4, i is not less than j
|
||||||
|
return false
|
||||||
|
|
||||||
|
case a4 != nil && b4 != nil:
|
||||||
|
// Special case for ipv4, a4 and b4 are not nil
|
||||||
|
aPrivate := isPrivateIP(a4)
|
||||||
|
bPrivate := isPrivateIP(b4)
|
||||||
|
switch {
|
||||||
|
case !aPrivate && bPrivate:
|
||||||
|
// If i is a public ip (not private) and j is a private ip, i is less then j
|
||||||
|
return true
|
||||||
|
|
||||||
|
case aPrivate && !bPrivate:
|
||||||
|
// If j is public (not private) then i is private due to the else, i is not less than j
|
||||||
|
return false
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Both i an j are either public or private, sort within that
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Both i an j are either ipv4 or ipv6, sort within that
|
||||||
|
}
|
||||||
|
|
||||||
|
// lexical order of ips 3rd
|
||||||
|
c := bytes.Compare(a.IP, b.IP)
|
||||||
|
if c == 0 {
|
||||||
|
// Ips are the same, Lexical order of ports 4th
|
||||||
|
return a.Port < b.Port
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ip wasn't the same
|
||||||
|
return c < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort it
|
||||||
|
sort.Slice(r.addrs, lessFunc)
|
||||||
|
|
||||||
|
// Deduplicate
|
||||||
|
a, b := 0, 1
|
||||||
|
for b < n {
|
||||||
|
if !r.addrs[a].Equals(r.addrs[b]) {
|
||||||
|
a++
|
||||||
|
if a != b {
|
||||||
|
r.addrs[a], r.addrs[b] = r.addrs[b], r.addrs[a]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b++
|
||||||
|
}
|
||||||
|
|
||||||
|
r.addrs = r.addrs[:a+1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// minInt returns the minimum integer of a or b
|
||||||
|
func minInt(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPreferred returns true of the ip is contained in the preferredRanges list
|
||||||
|
func isPreferred(ip net.IP, preferredRanges []*net.IPNet) bool {
|
||||||
|
//TODO: this would be better in a CIDR6Tree
|
||||||
|
for _, p := range preferredRanges {
|
||||||
|
if p.Contains(ip) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var _, private24BitBlock, _ = net.ParseCIDR("10.0.0.0/8")
|
||||||
|
var _, private20BitBlock, _ = net.ParseCIDR("172.16.0.0/12")
|
||||||
|
var _, private16BitBlock, _ = net.ParseCIDR("192.168.0.0/16")
|
||||||
|
|
||||||
|
// isPrivateIP returns true if the ip is contained by a rfc 1918 private range
|
||||||
|
func isPrivateIP(ip net.IP) bool {
|
||||||
|
//TODO: another great cidrtree option
|
||||||
|
//TODO: Private for ipv6 or just let it ride?
|
||||||
|
return private24BitBlock.Contains(ip) || private20BitBlock.Contains(ip) || private16BitBlock.Contains(ip)
|
||||||
|
}
|
||||||
235
remote_list_test.go
Normal file
235
remote_list_test.go
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRemoteList_Rebuild(t *testing.T) {
|
||||||
|
rl := NewRemoteList()
|
||||||
|
rl.unlockedSetV4(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
[]*Ip4AndPort{
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475}, // this is duped
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.0.182"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is duped
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101}, // this is duped
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101}, // this is a dupe
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.19.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.31.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is a dupe
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1476}, // almost dupe of 0 with a diff port
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475}, // this is a dupe
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
rl.unlockedSetV6(
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
[]*Ip6AndPort{
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 1), // this is duped
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 2), // almost dupe of 0 with a diff port, also gets duped
|
||||||
|
NewIp6AndPort(net.ParseIP("1:100::1"), 1),
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 1), // this is a dupe
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 2), // this is a dupe
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip6AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
rl.Rebuild([]*net.IPNet{})
|
||||||
|
assert.Len(t, rl.addrs, 10, "addrs contains too many entries")
|
||||||
|
|
||||||
|
// ipv6 first, sorted lexically within
|
||||||
|
assert.Equal(t, "[1::1]:1", rl.addrs[0].String())
|
||||||
|
assert.Equal(t, "[1::1]:2", rl.addrs[1].String())
|
||||||
|
assert.Equal(t, "[1:100::1]:1", rl.addrs[2].String())
|
||||||
|
|
||||||
|
// ipv4 last, sorted by public first, then private, lexically within them
|
||||||
|
assert.Equal(t, "70.199.182.92:1475", rl.addrs[3].String())
|
||||||
|
assert.Equal(t, "70.199.182.92:1476", rl.addrs[4].String())
|
||||||
|
assert.Equal(t, "172.17.0.182:10101", rl.addrs[5].String())
|
||||||
|
assert.Equal(t, "172.17.1.1:10101", rl.addrs[6].String())
|
||||||
|
assert.Equal(t, "172.18.0.1:10101", rl.addrs[7].String())
|
||||||
|
assert.Equal(t, "172.19.0.1:10101", rl.addrs[8].String())
|
||||||
|
assert.Equal(t, "172.31.0.1:10101", rl.addrs[9].String())
|
||||||
|
|
||||||
|
// Now ensure we can hoist ipv4 up
|
||||||
|
_, ipNet, err := net.ParseCIDR("0.0.0.0/0")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet})
|
||||||
|
assert.Len(t, rl.addrs, 10, "addrs contains too many entries")
|
||||||
|
|
||||||
|
// ipv4 first, public then private, lexically within them
|
||||||
|
assert.Equal(t, "70.199.182.92:1475", rl.addrs[0].String())
|
||||||
|
assert.Equal(t, "70.199.182.92:1476", rl.addrs[1].String())
|
||||||
|
assert.Equal(t, "172.17.0.182:10101", rl.addrs[2].String())
|
||||||
|
assert.Equal(t, "172.17.1.1:10101", rl.addrs[3].String())
|
||||||
|
assert.Equal(t, "172.18.0.1:10101", rl.addrs[4].String())
|
||||||
|
assert.Equal(t, "172.19.0.1:10101", rl.addrs[5].String())
|
||||||
|
assert.Equal(t, "172.31.0.1:10101", rl.addrs[6].String())
|
||||||
|
|
||||||
|
// ipv6 last, sorted by public first, then private, lexically within them
|
||||||
|
assert.Equal(t, "[1::1]:1", rl.addrs[7].String())
|
||||||
|
assert.Equal(t, "[1::1]:2", rl.addrs[8].String())
|
||||||
|
assert.Equal(t, "[1:100::1]:1", rl.addrs[9].String())
|
||||||
|
|
||||||
|
// Ensure we can hoist a specific ipv4 range over anything else
|
||||||
|
_, ipNet, err = net.ParseCIDR("172.17.0.0/16")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet})
|
||||||
|
assert.Len(t, rl.addrs, 10, "addrs contains too many entries")
|
||||||
|
|
||||||
|
// Preferred ipv4 first
|
||||||
|
assert.Equal(t, "172.17.0.182:10101", rl.addrs[0].String())
|
||||||
|
assert.Equal(t, "172.17.1.1:10101", rl.addrs[1].String())
|
||||||
|
|
||||||
|
// ipv6 next
|
||||||
|
assert.Equal(t, "[1::1]:1", rl.addrs[2].String())
|
||||||
|
assert.Equal(t, "[1::1]:2", rl.addrs[3].String())
|
||||||
|
assert.Equal(t, "[1:100::1]:1", rl.addrs[4].String())
|
||||||
|
|
||||||
|
// the remaining ipv4 last
|
||||||
|
assert.Equal(t, "70.199.182.92:1475", rl.addrs[5].String())
|
||||||
|
assert.Equal(t, "70.199.182.92:1476", rl.addrs[6].String())
|
||||||
|
assert.Equal(t, "172.18.0.1:10101", rl.addrs[7].String())
|
||||||
|
assert.Equal(t, "172.19.0.1:10101", rl.addrs[8].String())
|
||||||
|
assert.Equal(t, "172.31.0.1:10101", rl.addrs[9].String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFullRebuild(b *testing.B) {
|
||||||
|
rl := NewRemoteList()
|
||||||
|
rl.unlockedSetV4(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
[]*Ip4AndPort{
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.0.182"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.19.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.31.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is a dupe
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1476}, // dupe of 0 with a diff port
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
rl.unlockedSetV6(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
[]*Ip6AndPort{
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 1),
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 2), // dupe of 0 with a diff port
|
||||||
|
NewIp6AndPort(net.ParseIP("1:100::1"), 1),
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 1), // this is a dupe
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip6AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
b.Run("no preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.shouldRebuild = true
|
||||||
|
rl.Rebuild([]*net.IPNet{})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_, ipNet, err := net.ParseCIDR("172.17.0.0/16")
|
||||||
|
assert.NoError(b, err)
|
||||||
|
b.Run("1 preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.shouldRebuild = true
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_, ipNet2, err := net.ParseCIDR("70.0.0.0/8")
|
||||||
|
assert.NoError(b, err)
|
||||||
|
b.Run("2 preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.shouldRebuild = true
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet, ipNet2})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_, ipNet3, err := net.ParseCIDR("0.0.0.0/0")
|
||||||
|
assert.NoError(b, err)
|
||||||
|
b.Run("3 preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.shouldRebuild = true
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet, ipNet2, ipNet3})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSortRebuild(b *testing.B) {
|
||||||
|
rl := NewRemoteList()
|
||||||
|
rl.unlockedSetV4(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
[]*Ip4AndPort{
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.0.182"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.19.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.31.0.1"))), Port: 10101},
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is a dupe
|
||||||
|
{Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1476}, // dupe of 0 with a diff port
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
rl.unlockedSetV6(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
[]*Ip6AndPort{
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 1),
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 2), // dupe of 0 with a diff port
|
||||||
|
NewIp6AndPort(net.ParseIP("1:100::1"), 1),
|
||||||
|
NewIp6AndPort(net.ParseIP("1::1"), 1), // this is a dupe
|
||||||
|
},
|
||||||
|
func(iputil.VpnIp, *Ip6AndPort) bool { return true },
|
||||||
|
)
|
||||||
|
|
||||||
|
b.Run("no preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.shouldRebuild = true
|
||||||
|
rl.Rebuild([]*net.IPNet{})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_, ipNet, err := net.ParseCIDR("172.17.0.0/16")
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet})
|
||||||
|
|
||||||
|
assert.NoError(b, err)
|
||||||
|
b.Run("1 preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_, ipNet2, err := net.ParseCIDR("70.0.0.0/8")
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet, ipNet2})
|
||||||
|
|
||||||
|
assert.NoError(b, err)
|
||||||
|
b.Run("2 preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet, ipNet2})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_, ipNet3, err := net.ParseCIDR("0.0.0.0/0")
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet, ipNet2, ipNet3})
|
||||||
|
|
||||||
|
assert.NoError(b, err)
|
||||||
|
b.Run("3 preferred", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
rl.Rebuild([]*net.IPNet{ipNet, ipNet2, ipNet3})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
244
ssh.go
244
ssh.go
@@ -5,15 +5,21 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/slackhq/nebula/sshd"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/sshd"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type sshListHostMapFlags struct {
|
type sshListHostMapFlags struct {
|
||||||
@@ -24,6 +30,7 @@ type sshListHostMapFlags struct {
|
|||||||
type sshPrintCertFlags struct {
|
type sshPrintCertFlags struct {
|
||||||
Json bool
|
Json bool
|
||||||
Pretty bool
|
Pretty bool
|
||||||
|
Raw bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type sshPrintTunnelFlags struct {
|
type sshPrintTunnelFlags struct {
|
||||||
@@ -42,50 +49,58 @@ type sshCreateTunnelFlags struct {
|
|||||||
Address string
|
Address string
|
||||||
}
|
}
|
||||||
|
|
||||||
func wireSSHReload(ssh *sshd.SSHServer, c *Config) {
|
func wireSSHReload(l *logrus.Logger, ssh *sshd.SSHServer, c *config.C) {
|
||||||
c.RegisterReloadCallback(func(c *Config) {
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
if c.GetBool("sshd.enabled", false) {
|
if c.GetBool("sshd.enabled", false) {
|
||||||
err := configSSH(ssh, c)
|
sshRun, err := configSSH(l, ssh, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Error("Failed to reconfigure the sshd")
|
l.WithError(err).Error("Failed to reconfigure the sshd")
|
||||||
ssh.Stop()
|
ssh.Stop()
|
||||||
}
|
}
|
||||||
|
if sshRun != nil {
|
||||||
|
go sshRun()
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ssh.Stop()
|
ssh.Stop()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func configSSH(ssh *sshd.SSHServer, c *Config) error {
|
// configSSH reads the ssh info out of the passed-in Config and
|
||||||
|
// updates the passed-in SSHServer. On success, it returns a function
|
||||||
|
// that callers may invoke to run the configured ssh server. On
|
||||||
|
// failure, it returns nil, error.
|
||||||
|
func configSSH(l *logrus.Logger, ssh *sshd.SSHServer, c *config.C) (func(), error) {
|
||||||
//TODO conntrack list
|
//TODO conntrack list
|
||||||
//TODO print firewall rules or hash?
|
//TODO print firewall rules or hash?
|
||||||
|
|
||||||
listen := c.GetString("sshd.listen", "")
|
listen := c.GetString("sshd.listen", "")
|
||||||
if listen == "" {
|
if listen == "" {
|
||||||
return fmt.Errorf("sshd.listen must be provided")
|
return nil, fmt.Errorf("sshd.listen must be provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
port := strings.Split(listen, ":")
|
_, port, err := net.SplitHostPort(listen)
|
||||||
if len(port) < 2 {
|
if err != nil {
|
||||||
return fmt.Errorf("sshd.listen does not have a port")
|
return nil, fmt.Errorf("invalid sshd.listen address: %s", err)
|
||||||
} else if port[1] == "22" {
|
}
|
||||||
return fmt.Errorf("sshd.listen can not use port 22")
|
if port == "22" {
|
||||||
|
return nil, fmt.Errorf("sshd.listen can not use port 22")
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: no good way to reload this right now
|
//TODO: no good way to reload this right now
|
||||||
hostKeyFile := c.GetString("sshd.host_key", "")
|
hostKeyFile := c.GetString("sshd.host_key", "")
|
||||||
if hostKeyFile == "" {
|
if hostKeyFile == "" {
|
||||||
return fmt.Errorf("sshd.host_key must be provided")
|
return nil, fmt.Errorf("sshd.host_key must be provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
hostKeyBytes, err := ioutil.ReadFile(hostKeyFile)
|
hostKeyBytes, err := ioutil.ReadFile(hostKeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while loading sshd.host_key file: %s", err)
|
return nil, fmt.Errorf("error while loading sshd.host_key file: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ssh.SetHostKey(hostKeyBytes)
|
err = ssh.SetHostKey(hostKeyBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while adding sshd.host_key: %s", err)
|
return nil, fmt.Errorf("error while adding sshd.host_key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawKeys := c.Get("sshd.authorized_users")
|
rawKeys := c.Get("sshd.authorized_users")
|
||||||
@@ -136,17 +151,22 @@ func configSSH(ssh *sshd.SSHServer, c *Config) error {
|
|||||||
l.Info("no ssh users to authorize")
|
l.Info("no ssh users to authorize")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var runner func()
|
||||||
if c.GetBool("sshd.enabled", false) {
|
if c.GetBool("sshd.enabled", false) {
|
||||||
ssh.Stop()
|
ssh.Stop()
|
||||||
go ssh.Run(listen)
|
runner = func() {
|
||||||
|
if err := ssh.Run(listen); err != nil {
|
||||||
|
l.WithField("err", err).Warn("Failed to run the SSH server")
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ssh.Stop()
|
ssh.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return runner, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func attachCommands(ssh *sshd.SSHServer, hostMap *HostMap, pendingHostMap *HostMap, lightHouse *LightHouse, ifce *Interface) {
|
func attachCommands(l *logrus.Logger, ssh *sshd.SSHServer, hostMap *HostMap, pendingHostMap *HostMap, lightHouse *LightHouse, ifce *Interface) {
|
||||||
ssh.RegisterCommand(&sshd.Command{
|
ssh.RegisterCommand(&sshd.Command{
|
||||||
Name: "list-hostmap",
|
Name: "list-hostmap",
|
||||||
ShortDescription: "List all known previously connected hosts",
|
ShortDescription: "List all known previously connected hosts",
|
||||||
@@ -222,13 +242,17 @@ func attachCommands(ssh *sshd.SSHServer, hostMap *HostMap, pendingHostMap *HostM
|
|||||||
ssh.RegisterCommand(&sshd.Command{
|
ssh.RegisterCommand(&sshd.Command{
|
||||||
Name: "log-level",
|
Name: "log-level",
|
||||||
ShortDescription: "Gets or sets the current log level",
|
ShortDescription: "Gets or sets the current log level",
|
||||||
Callback: sshLogLevel,
|
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
|
||||||
|
return sshLogLevel(l, fs, a, w)
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
ssh.RegisterCommand(&sshd.Command{
|
ssh.RegisterCommand(&sshd.Command{
|
||||||
Name: "log-format",
|
Name: "log-format",
|
||||||
ShortDescription: "Gets or sets the current log format",
|
ShortDescription: "Gets or sets the current log format",
|
||||||
Callback: sshLogFormat,
|
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
|
||||||
|
return sshLogFormat(l, fs, a, w)
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
ssh.RegisterCommand(&sshd.Command{
|
ssh.RegisterCommand(&sshd.Command{
|
||||||
@@ -247,6 +271,7 @@ func attachCommands(ssh *sshd.SSHServer, hostMap *HostMap, pendingHostMap *HostM
|
|||||||
s := sshPrintCertFlags{}
|
s := sshPrintCertFlags{}
|
||||||
fl.BoolVar(&s.Json, "json", false, "outputs as json")
|
fl.BoolVar(&s.Json, "json", false, "outputs as json")
|
||||||
fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json")
|
fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json")
|
||||||
|
fl.BoolVar(&s.Raw, "raw", false, "raw prints the PEM encoded certificate, not compatible with -json or -pretty")
|
||||||
return fl, &s
|
return fl, &s
|
||||||
},
|
},
|
||||||
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
|
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
|
||||||
@@ -328,8 +353,10 @@ func sshListHostMap(hostMap *HostMap, a interface{}, w sshd.StringWriter) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
hostMap.RLock()
|
hm := listHostMap(hostMap)
|
||||||
defer hostMap.RUnlock()
|
sort.Slice(hm, func(i, j int) bool {
|
||||||
|
return bytes.Compare(hm[i].VpnIp, hm[j].VpnIp) < 0
|
||||||
|
})
|
||||||
|
|
||||||
if fs.Json || fs.Pretty {
|
if fs.Json || fs.Pretty {
|
||||||
js := json.NewEncoder(w.GetWriter())
|
js := json.NewEncoder(w.GetWriter())
|
||||||
@@ -337,35 +364,15 @@ func sshListHostMap(hostMap *HostMap, a interface{}, w sshd.StringWriter) error
|
|||||||
js.SetIndent("", " ")
|
js.SetIndent("", " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
d := make([]m, len(hostMap.Hosts))
|
err := js.Encode(hm)
|
||||||
x := 0
|
|
||||||
var h m
|
|
||||||
for _, v := range hostMap.Hosts {
|
|
||||||
h = m{
|
|
||||||
"vpnIp": int2ip(v.hostId),
|
|
||||||
"localIndex": v.localIndexId,
|
|
||||||
"remoteIndex": v.remoteIndexId,
|
|
||||||
"remoteAddrs": v.RemoteUDPAddrs(),
|
|
||||||
"cachedPackets": len(v.packetStore),
|
|
||||||
"cert": v.GetCert(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.ConnectionState != nil {
|
|
||||||
h["messageCounter"] = v.ConnectionState.messageCounter
|
|
||||||
}
|
|
||||||
|
|
||||||
d[x] = h
|
|
||||||
x++
|
|
||||||
}
|
|
||||||
|
|
||||||
err := js.Encode(d)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//TODO
|
//TODO
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
for i, v := range hostMap.Hosts {
|
for _, v := range hm {
|
||||||
err := w.WriteLine(fmt.Sprintf("%s: %s", int2ip(i), v.RemoteUDPAddrs()))
|
err := w.WriteLine(fmt.Sprintf("%s: %s", v.VpnIp, v.RemoteAddrs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -382,8 +389,26 @@ func sshListLighthouseMap(lightHouse *LightHouse, a interface{}, w sshd.StringWr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type lighthouseInfo struct {
|
||||||
|
VpnIp string `json:"vpnIp"`
|
||||||
|
Addrs *CacheMap `json:"addrs"`
|
||||||
|
}
|
||||||
|
|
||||||
lightHouse.RLock()
|
lightHouse.RLock()
|
||||||
defer lightHouse.RUnlock()
|
addrMap := make([]lighthouseInfo, len(lightHouse.addrMap))
|
||||||
|
x := 0
|
||||||
|
for k, v := range lightHouse.addrMap {
|
||||||
|
addrMap[x] = lighthouseInfo{
|
||||||
|
VpnIp: k.String(),
|
||||||
|
Addrs: v.CopyCache(),
|
||||||
|
}
|
||||||
|
x++
|
||||||
|
}
|
||||||
|
lightHouse.RUnlock()
|
||||||
|
|
||||||
|
sort.Slice(addrMap, func(i, j int) bool {
|
||||||
|
return strings.Compare(addrMap[i].VpnIp, addrMap[j].VpnIp) < 0
|
||||||
|
})
|
||||||
|
|
||||||
if fs.Json || fs.Pretty {
|
if fs.Json || fs.Pretty {
|
||||||
js := json.NewEncoder(w.GetWriter())
|
js := json.NewEncoder(w.GetWriter())
|
||||||
@@ -391,36 +416,19 @@ func sshListLighthouseMap(lightHouse *LightHouse, a interface{}, w sshd.StringWr
|
|||||||
js.SetIndent("", " ")
|
js.SetIndent("", " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
d := make([]m, len(lightHouse.addrMap))
|
err := js.Encode(addrMap)
|
||||||
x := 0
|
|
||||||
var h m
|
|
||||||
for vpnIp, v := range lightHouse.addrMap {
|
|
||||||
ips := make([]string, len(v))
|
|
||||||
for i, ip := range v {
|
|
||||||
ips[i] = ip.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
h = m{
|
|
||||||
"vpnIp": int2ip(vpnIp),
|
|
||||||
"addrs": ips,
|
|
||||||
}
|
|
||||||
|
|
||||||
d[x] = h
|
|
||||||
x++
|
|
||||||
}
|
|
||||||
|
|
||||||
err := js.Encode(d)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//TODO
|
//TODO
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
for vpnIp, v := range lightHouse.addrMap {
|
for _, v := range addrMap {
|
||||||
ips := make([]string, len(v))
|
b, err := json.Marshal(v.Addrs)
|
||||||
for i, ip := range v {
|
if err != nil {
|
||||||
ips[i] = ip.String()
|
return err
|
||||||
}
|
}
|
||||||
err := w.WriteLine(fmt.Sprintf("%s: %s", int2ip(vpnIp), ips))
|
err = w.WriteLine(fmt.Sprintf("%s: %s", v.VpnIp, string(b)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -461,13 +469,22 @@ func sshQueryLighthouse(ifce *Interface, fs interface{}, a []string, w sshd.Stri
|
|||||||
return w.WriteLine("No vpn ip was provided")
|
return w.WriteLine("No vpn ip was provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp := ip2int(net.ParseIP(a[0]))
|
parsedIp := net.ParseIP(a[0])
|
||||||
|
if parsedIp == nil {
|
||||||
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(parsedIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
ips, _ := ifce.lightHouse.Query(vpnIp, ifce)
|
var cm *CacheMap
|
||||||
return json.NewEncoder(w.GetWriter()).Encode(ips)
|
rl := ifce.lightHouse.Query(vpnIp, ifce)
|
||||||
|
if rl != nil {
|
||||||
|
cm = rl.CopyCache()
|
||||||
|
}
|
||||||
|
return json.NewEncoder(w.GetWriter()).Encode(cm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshCloseTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error {
|
func sshCloseTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error {
|
||||||
@@ -481,19 +498,24 @@ func sshCloseTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
|
|||||||
return w.WriteLine("No vpn ip was provided")
|
return w.WriteLine("No vpn ip was provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp := ip2int(net.ParseIP(a[0]))
|
parsedIp := net.ParseIP(a[0])
|
||||||
|
if parsedIp == nil {
|
||||||
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(parsedIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo, err := ifce.hostMap.QueryVpnIP(uint32(vpnIp))
|
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !flags.LocalOnly {
|
if !flags.LocalOnly {
|
||||||
ifce.send(
|
ifce.send(
|
||||||
closeTunnel,
|
header.CloseTunnel,
|
||||||
0,
|
0,
|
||||||
hostInfo.ConnectionState,
|
hostInfo.ConnectionState,
|
||||||
hostInfo,
|
hostInfo,
|
||||||
@@ -504,7 +526,7 @@ func sshCloseTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
ifce.closeTunnel(hostInfo)
|
ifce.closeTunnel(hostInfo, false)
|
||||||
return w.WriteLine("Closed")
|
return w.WriteLine("Closed")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -519,32 +541,37 @@ func sshCreateTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringW
|
|||||||
return w.WriteLine("No vpn ip was provided")
|
return w.WriteLine("No vpn ip was provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp := ip2int(net.ParseIP(a[0]))
|
parsedIp := net.ParseIP(a[0])
|
||||||
|
if parsedIp == nil {
|
||||||
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(parsedIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo, _ := ifce.hostMap.QueryVpnIP(uint32(vpnIp))
|
hostInfo, _ := ifce.hostMap.QueryVpnIp(vpnIp)
|
||||||
if hostInfo != nil {
|
if hostInfo != nil {
|
||||||
return w.WriteLine(fmt.Sprintf("Tunnel already exists"))
|
return w.WriteLine(fmt.Sprintf("Tunnel already exists"))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo, _ = ifce.handshakeManager.pendingHostMap.QueryVpnIP(uint32(vpnIp))
|
hostInfo, _ = ifce.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp)
|
||||||
if hostInfo != nil {
|
if hostInfo != nil {
|
||||||
return w.WriteLine(fmt.Sprintf("Tunnel already handshaking"))
|
return w.WriteLine(fmt.Sprintf("Tunnel already handshaking"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var addr *udpAddr
|
var addr *udp.Addr
|
||||||
if flags.Address != "" {
|
if flags.Address != "" {
|
||||||
addr = NewUDPAddrFromString(flags.Address)
|
addr = udp.NewAddrFromString(flags.Address)
|
||||||
if addr == nil {
|
if addr == nil {
|
||||||
return w.WriteLine("Address could not be parsed")
|
return w.WriteLine("Address could not be parsed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo = ifce.handshakeManager.AddVpnIP(vpnIp)
|
hostInfo = ifce.handshakeManager.AddVpnIp(vpnIp)
|
||||||
if addr != nil {
|
if addr != nil {
|
||||||
hostInfo.SetRemote(*addr)
|
hostInfo.SetRemote(addr)
|
||||||
}
|
}
|
||||||
ifce.getOrHandshake(vpnIp)
|
ifce.getOrHandshake(vpnIp)
|
||||||
|
|
||||||
@@ -566,22 +593,27 @@ func sshChangeRemote(ifce *Interface, fs interface{}, a []string, w sshd.StringW
|
|||||||
return w.WriteLine("No address was provided")
|
return w.WriteLine("No address was provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := NewUDPAddrFromString(flags.Address)
|
addr := udp.NewAddrFromString(flags.Address)
|
||||||
if addr == nil {
|
if addr == nil {
|
||||||
return w.WriteLine("Address could not be parsed")
|
return w.WriteLine("Address could not be parsed")
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp := ip2int(net.ParseIP(a[0]))
|
parsedIp := net.ParseIP(a[0])
|
||||||
|
if parsedIp == nil {
|
||||||
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(parsedIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo, err := ifce.hostMap.QueryVpnIP(uint32(vpnIp))
|
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo.SetRemote(*addr)
|
hostInfo.SetRemote(addr)
|
||||||
return w.WriteLine("Changed")
|
return w.WriteLine("Changed")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -606,7 +638,7 @@ func sshGetHeapProfile(fs interface{}, a []string, w sshd.StringWriter) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshLogLevel(fs interface{}, a []string, w sshd.StringWriter) error {
|
func sshLogLevel(l *logrus.Logger, fs interface{}, a []string, w sshd.StringWriter) error {
|
||||||
if len(a) == 0 {
|
if len(a) == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level))
|
return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level))
|
||||||
}
|
}
|
||||||
@@ -620,7 +652,7 @@ func sshLogLevel(fs interface{}, a []string, w sshd.StringWriter) error {
|
|||||||
return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level))
|
return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level))
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshLogFormat(fs interface{}, a []string, w sshd.StringWriter) error {
|
func sshLogFormat(l *logrus.Logger, fs interface{}, a []string, w sshd.StringWriter) error {
|
||||||
if len(a) == 0 {
|
if len(a) == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("Log format is: %s", reflect.TypeOf(l.Formatter)))
|
return w.WriteLine(fmt.Sprintf("Log format is: %s", reflect.TypeOf(l.Formatter)))
|
||||||
}
|
}
|
||||||
@@ -647,12 +679,17 @@ func sshPrintCert(ifce *Interface, fs interface{}, a []string, w sshd.StringWrit
|
|||||||
|
|
||||||
cert := ifce.certState.certificate
|
cert := ifce.certState.certificate
|
||||||
if len(a) > 0 {
|
if len(a) > 0 {
|
||||||
vpnIp := ip2int(net.ParseIP(a[0]))
|
parsedIp := net.ParseIP(a[0])
|
||||||
|
if parsedIp == nil {
|
||||||
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(parsedIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo, err := ifce.hostMap.QueryVpnIP(uint32(vpnIp))
|
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
||||||
}
|
}
|
||||||
@@ -680,6 +717,16 @@ func sshPrintCert(ifce *Interface, fs interface{}, a []string, w sshd.StringWrit
|
|||||||
return w.WriteBytes(b)
|
return w.WriteBytes(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if args.Raw {
|
||||||
|
b, err := cert.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
//TODO: handle it
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.WriteBytes(b)
|
||||||
|
}
|
||||||
|
|
||||||
return w.WriteLine(cert.String())
|
return w.WriteLine(cert.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -694,12 +741,17 @@ func sshPrintTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
|
|||||||
return w.WriteLine("No vpn ip was provided")
|
return w.WriteLine("No vpn ip was provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp := ip2int(net.ParseIP(a[0]))
|
parsedIp := net.ParseIP(a[0])
|
||||||
|
if parsedIp == nil {
|
||||||
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(parsedIp)
|
||||||
if vpnIp == 0 {
|
if vpnIp == 0 {
|
||||||
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo, err := ifce.hostMap.QueryVpnIP(uint32(vpnIp))
|
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
|
||||||
}
|
}
|
||||||
@@ -709,7 +761,7 @@ func sshPrintTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
|
|||||||
enc.SetIndent("", " ")
|
enc.SetIndent("", " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
return enc.Encode(hostInfo)
|
return enc.Encode(copyHostInfo(hostInfo, ifce.hostMap.preferredRanges))
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshReload(fs interface{}, a []string, w sshd.StringWriter) error {
|
func sshReload(fs interface{}, a []string, w sshd.StringWriter) error {
|
||||||
|
|||||||
@@ -4,9 +4,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/armon/go-radix"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/armon/go-radix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CommandFlags is a function called before help or command execution to parse command line flags
|
// CommandFlags is a function called before help or command execution to parse command line flags
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user