mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 16:34:25 +01:00
Compare commits
1 Commits
one-nine-b
...
prometheus
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
06372e12f1 |
2
.github/workflows/gofmt.yml
vendored
2
.github/workflows/gofmt.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Install goimports
|
- name: Install goimports
|
||||||
|
|||||||
64
.github/workflows/release.yml
vendored
64
.github/workflows/release.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -24,7 +24,7 @@ jobs:
|
|||||||
mv build/*.tar.gz release
|
mv build/*.tar.gz release
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: linux-latest
|
name: linux-latest
|
||||||
path: release
|
path: release
|
||||||
@@ -37,7 +37,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -55,7 +55,7 @@ jobs:
|
|||||||
mv dist\windows\wintun build\dist\windows\
|
mv dist\windows\wintun build\dist\windows\
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: windows-latest
|
name: windows-latest
|
||||||
path: build
|
path: build
|
||||||
@@ -64,18 +64,18 @@ jobs:
|
|||||||
name: Build Universal Darwin
|
name: Build Universal Darwin
|
||||||
env:
|
env:
|
||||||
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
||||||
runs-on: macos-latest
|
runs-on: macos-11
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Import certificates
|
- name: Import certificates
|
||||||
if: env.HAS_SIGNING_CREDS == 'true'
|
if: env.HAS_SIGNING_CREDS == 'true'
|
||||||
uses: Apple-Actions/import-codesign-certs@v3
|
uses: Apple-Actions/import-codesign-certs@v2
|
||||||
with:
|
with:
|
||||||
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
||||||
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
||||||
@@ -104,57 +104,11 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: darwin-latest
|
name: darwin-latest
|
||||||
path: ./release/*
|
path: ./release/*
|
||||||
|
|
||||||
build-docker:
|
|
||||||
name: Create and Upload Docker Images
|
|
||||||
# Technically we only need build-linux to succeed, but if any platforms fail we'll
|
|
||||||
# want to investigate and restart the build
|
|
||||||
needs: [build-linux, build-darwin, build-windows]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
HAS_DOCKER_CREDS: ${{ vars.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
|
||||||
# XXX It's not possible to write a conditional here, so instead we do it on every step
|
|
||||||
#if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
|
||||||
steps:
|
|
||||||
# Be sure to checkout the code before downloading artifacts, or they will
|
|
||||||
# be overwritten
|
|
||||||
- name: Checkout code
|
|
||||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Download artifacts
|
|
||||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: linux-latest
|
|
||||||
path: artifacts
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Build and push images
|
|
||||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE_REPO: ${{ vars.DOCKER_IMAGE_REPO || 'nebulaoss/nebula' }}
|
|
||||||
DOCKER_IMAGE_TAG: ${{ vars.DOCKER_IMAGE_TAG || 'latest' }}
|
|
||||||
run: |
|
|
||||||
mkdir -p build/linux-{amd64,arm64}
|
|
||||||
tar -zxvf artifacts/nebula-linux-amd64.tar.gz -C build/linux-amd64/
|
|
||||||
tar -zxvf artifacts/nebula-linux-arm64.tar.gz -C build/linux-arm64/
|
|
||||||
docker buildx build . --push -f docker/Dockerfile --platform linux/amd64,linux/arm64 --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:${GITHUB_REF#refs/tags/v}"
|
|
||||||
|
|
||||||
release:
|
release:
|
||||||
name: Create and Upload Release
|
name: Create and Upload Release
|
||||||
needs: [build-linux, build-darwin, build-windows]
|
needs: [build-linux, build-darwin, build-windows]
|
||||||
@@ -163,7 +117,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
path: artifacts
|
path: artifacts
|
||||||
|
|
||||||
|
|||||||
48
.github/workflows/smoke-extra.yml
vendored
48
.github/workflows/smoke-extra.yml
vendored
@@ -1,48 +0,0 @@
|
|||||||
name: smoke-extra
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [opened, synchronize, labeled, reopened]
|
|
||||||
paths:
|
|
||||||
- '.github/workflows/smoke**'
|
|
||||||
- '**Makefile'
|
|
||||||
- '**.go'
|
|
||||||
- '**.proto'
|
|
||||||
- 'go.mod'
|
|
||||||
- 'go.sum'
|
|
||||||
jobs:
|
|
||||||
|
|
||||||
smoke-extra:
|
|
||||||
if: github.ref == 'refs/heads/master' || contains(github.event.pull_request.labels.*.name, 'smoke-test-extra')
|
|
||||||
name: Run extra smoke tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version-file: 'go.mod'
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: install vagrant
|
|
||||||
run: sudo apt-get update && sudo apt-get install -y vagrant virtualbox
|
|
||||||
|
|
||||||
- name: freebsd-amd64
|
|
||||||
run: make smoke-vagrant/freebsd-amd64
|
|
||||||
|
|
||||||
- name: openbsd-amd64
|
|
||||||
run: make smoke-vagrant/openbsd-amd64
|
|
||||||
|
|
||||||
- name: netbsd-amd64
|
|
||||||
run: make smoke-vagrant/netbsd-amd64
|
|
||||||
|
|
||||||
- name: linux-386
|
|
||||||
run: make smoke-vagrant/linux-386
|
|
||||||
|
|
||||||
- name: linux-amd64-ipv6disable
|
|
||||||
run: make smoke-vagrant/linux-amd64-ipv6disable
|
|
||||||
|
|
||||||
timeout-minutes: 30
|
|
||||||
2
.github/workflows/smoke.yml
vendored
2
.github/workflows/smoke.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
|
|||||||
5
.github/workflows/smoke/build.sh
vendored
5
.github/workflows/smoke/build.sh
vendored
@@ -11,11 +11,6 @@ mkdir ./build
|
|||||||
cp ../../../../build/linux-amd64/nebula .
|
cp ../../../../build/linux-amd64/nebula .
|
||||||
cp ../../../../build/linux-amd64/nebula-cert .
|
cp ../../../../build/linux-amd64/nebula-cert .
|
||||||
|
|
||||||
if [ "$1" ]
|
|
||||||
then
|
|
||||||
cp "../../../../build/$1/nebula" "$1-nebula"
|
|
||||||
fi
|
|
||||||
|
|
||||||
HOST="lighthouse1" \
|
HOST="lighthouse1" \
|
||||||
AM_LIGHTHOUSE=true \
|
AM_LIGHTHOUSE=true \
|
||||||
../genconfig.sh >lighthouse1.yml
|
../genconfig.sh >lighthouse1.yml
|
||||||
|
|||||||
2
.github/workflows/smoke/genconfig.sh
vendored
2
.github/workflows/smoke/genconfig.sh
vendored
@@ -47,7 +47,7 @@ listen:
|
|||||||
port: ${LISTEN_PORT:-4242}
|
port: ${LISTEN_PORT:-4242}
|
||||||
|
|
||||||
tun:
|
tun:
|
||||||
dev: ${TUN_DEV:-tun0}
|
dev: ${TUN_DEV:-nebula1}
|
||||||
|
|
||||||
firewall:
|
firewall:
|
||||||
inbound_action: reject
|
inbound_action: reject
|
||||||
|
|||||||
2
.github/workflows/smoke/smoke-relay.sh
vendored
2
.github/workflows/smoke/smoke-relay.sh
vendored
@@ -76,7 +76,7 @@ docker exec host4 sh -c 'kill 1'
|
|||||||
docker exec host3 sh -c 'kill 1'
|
docker exec host3 sh -c 'kill 1'
|
||||||
docker exec host2 sh -c 'kill 1'
|
docker exec host2 sh -c 'kill 1'
|
||||||
docker exec lighthouse1 sh -c 'kill 1'
|
docker exec lighthouse1 sh -c 'kill 1'
|
||||||
sleep 5
|
sleep 1
|
||||||
|
|
||||||
if [ "$(jobs -r)" ]
|
if [ "$(jobs -r)" ]
|
||||||
then
|
then
|
||||||
|
|||||||
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
@@ -1,105 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e -x
|
|
||||||
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
export VAGRANT_CWD="$PWD/vagrant-$1"
|
|
||||||
|
|
||||||
mkdir -p logs
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
echo
|
|
||||||
echo " *** cleanup"
|
|
||||||
echo
|
|
||||||
|
|
||||||
set +e
|
|
||||||
if [ "$(jobs -r)" ]
|
|
||||||
then
|
|
||||||
docker kill lighthouse1 host2
|
|
||||||
fi
|
|
||||||
vagrant destroy -f
|
|
||||||
}
|
|
||||||
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
CONTAINER="nebula:${NAME:-smoke}"
|
|
||||||
|
|
||||||
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
|
||||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
|
||||||
|
|
||||||
vagrant up
|
|
||||||
vagrant ssh -c "cd /nebula && /nebula/$1-nebula -config host3.yml -test"
|
|
||||||
|
|
||||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
|
||||||
sleep 1
|
|
||||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
|
||||||
sleep 1
|
|
||||||
vagrant ssh -c "cd /nebula && sudo sh -c 'echo \$\$ >/nebula/pid && exec /nebula/$1-nebula -config host3.yml'" &
|
|
||||||
sleep 15
|
|
||||||
|
|
||||||
# grab tcpdump pcaps for debugging
|
|
||||||
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
|
||||||
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
|
||||||
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
|
||||||
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
|
||||||
# vagrant ssh -c "tcpdump -i nebula1 -q -w - -U" 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
|
||||||
# vagrant ssh -c "tcpdump -i eth0 -q -w - -U" 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
|
||||||
|
|
||||||
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
|
||||||
vagrant ssh -c "ncat -nklv 0.0.0.0 2000" &
|
|
||||||
#docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
|
||||||
#vagrant ssh -c "ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000" &
|
|
||||||
|
|
||||||
set +x
|
|
||||||
echo
|
|
||||||
echo " *** Testing ping from lighthouse1"
|
|
||||||
echo
|
|
||||||
set -x
|
|
||||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
|
||||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
|
||||||
|
|
||||||
set +x
|
|
||||||
echo
|
|
||||||
echo " *** Testing ping from host2"
|
|
||||||
echo
|
|
||||||
set -x
|
|
||||||
docker exec host2 ping -c1 192.168.100.1
|
|
||||||
# Should fail because not allowed by host3 inbound firewall
|
|
||||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
|
||||||
|
|
||||||
set +x
|
|
||||||
echo
|
|
||||||
echo " *** Testing ncat from host2"
|
|
||||||
echo
|
|
||||||
set -x
|
|
||||||
# Should fail because not allowed by host3 inbound firewall
|
|
||||||
#! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
|
||||||
#! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
|
||||||
|
|
||||||
set +x
|
|
||||||
echo
|
|
||||||
echo " *** Testing ping from host3"
|
|
||||||
echo
|
|
||||||
set -x
|
|
||||||
vagrant ssh -c "ping -c1 192.168.100.1"
|
|
||||||
vagrant ssh -c "ping -c1 192.168.100.2"
|
|
||||||
|
|
||||||
set +x
|
|
||||||
echo
|
|
||||||
echo " *** Testing ncat from host3"
|
|
||||||
echo
|
|
||||||
set -x
|
|
||||||
#vagrant ssh -c "ncat -nzv -w5 192.168.100.2 2000"
|
|
||||||
#vagrant ssh -c "ncat -nzuv -w5 192.168.100.2 3000" | grep -q host2
|
|
||||||
|
|
||||||
vagrant ssh -c "sudo xargs kill </nebula/pid"
|
|
||||||
docker exec host2 sh -c 'kill 1'
|
|
||||||
docker exec lighthouse1 sh -c 'kill 1'
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
if [ "$(jobs -r)" ]
|
|
||||||
then
|
|
||||||
echo "nebula still running after SIGTERM sent" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
2
.github/workflows/smoke/smoke.sh
vendored
2
.github/workflows/smoke/smoke.sh
vendored
@@ -129,7 +129,7 @@ docker exec host4 sh -c 'kill 1'
|
|||||||
docker exec host3 sh -c 'kill 1'
|
docker exec host3 sh -c 'kill 1'
|
||||||
docker exec host2 sh -c 'kill 1'
|
docker exec host2 sh -c 'kill 1'
|
||||||
docker exec lighthouse1 sh -c 'kill 1'
|
docker exec lighthouse1 sh -c 'kill 1'
|
||||||
sleep 5
|
sleep 1
|
||||||
|
|
||||||
if [ "$(jobs -r)" ]
|
if [ "$(jobs -r)" ]
|
||||||
then
|
then
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
# -*- mode: ruby -*-
|
|
||||||
# vi: set ft=ruby :
|
|
||||||
Vagrant.configure("2") do |config|
|
|
||||||
config.vm.box = "generic/freebsd14"
|
|
||||||
|
|
||||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
|
||||||
end
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
# -*- mode: ruby -*-
|
|
||||||
# vi: set ft=ruby :
|
|
||||||
Vagrant.configure("2") do |config|
|
|
||||||
config.vm.box = "ubuntu/xenial32"
|
|
||||||
|
|
||||||
config.vm.synced_folder "../build", "/nebula"
|
|
||||||
end
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
# -*- mode: ruby -*-
|
|
||||||
# vi: set ft=ruby :
|
|
||||||
Vagrant.configure("2") do |config|
|
|
||||||
config.vm.box = "ubuntu/jammy64"
|
|
||||||
|
|
||||||
config.vm.synced_folder "../build", "/nebula"
|
|
||||||
|
|
||||||
config.vm.provision :shell do |shell|
|
|
||||||
shell.inline = <<-EOF
|
|
||||||
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="ipv6.disable=1"/' /etc/default/grub
|
|
||||||
update-grub
|
|
||||||
EOF
|
|
||||||
shell.privileged = true
|
|
||||||
shell.reboot = true
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
# -*- mode: ruby -*-
|
|
||||||
# vi: set ft=ruby :
|
|
||||||
Vagrant.configure("2") do |config|
|
|
||||||
config.vm.box = "generic/netbsd9"
|
|
||||||
|
|
||||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
|
||||||
end
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
# -*- mode: ruby -*-
|
|
||||||
# vi: set ft=ruby :
|
|
||||||
Vagrant.configure("2") do |config|
|
|
||||||
config.vm.box = "generic/openbsd7"
|
|
||||||
|
|
||||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
|
||||||
end
|
|
||||||
20
.github/workflows/test.yml
vendored
20
.github/workflows/test.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -40,10 +40,10 @@ jobs:
|
|||||||
- name: Build test mobile
|
- name: Build test mobile
|
||||||
run: make build-test-mobile
|
run: make build-test-mobile
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: e2e packet flow linux-latest
|
name: e2e packet flow
|
||||||
path: e2e/mermaid/linux-latest
|
path: e2e/mermaid/
|
||||||
if-no-files-found: warn
|
if-no-files-found: warn
|
||||||
|
|
||||||
test-linux-boringcrypto:
|
test-linux-boringcrypto:
|
||||||
@@ -55,7 +55,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -72,14 +72,14 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [windows-latest, macos-latest]
|
os: [windows-latest, macos-11]
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build nebula
|
- name: Build nebula
|
||||||
@@ -97,8 +97,8 @@ jobs:
|
|||||||
- name: End 2 end
|
- name: End 2 end
|
||||||
run: make e2evv
|
run: make e2evv
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: e2e packet flow ${{ matrix.os }}
|
name: e2e packet flow
|
||||||
path: e2e/mermaid/${{ matrix.os }}
|
path: e2e/mermaid/
|
||||||
if-no-files-found: warn
|
if-no-files-found: warn
|
||||||
|
|||||||
155
CHANGELOG.md
155
CHANGELOG.md
@@ -7,151 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Various dependency updates.
|
|
||||||
|
|
||||||
## [1.9.7] - 2025-10-10
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
- Fix an issue where Nebula could incorrectly accept and process a packet from an erroneous source IP when the sender's
|
|
||||||
certificate is configured with unsafe_routes (cert v1/v2) or multiple IPs (cert v2). (#1494)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Disable sending `recv_error` messages when a packet is received outside the allowable counter window. (#1459)
|
|
||||||
- Improve error messages and remove some unnecessary fatal conditions in the Windows and generic udp listener. (#1543)
|
|
||||||
|
|
||||||
## [1.9.6] - 2025-7-15
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Support dropping inactive tunnels. This is disabled by default in this release but can be enabled with `tunnels.drop_inactive`. See example config for more details. (#1413)
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fix Darwin freeze due to presence of some Network Extensions (#1426)
|
|
||||||
- Ensure the same relay tunnel is always used when multiple relay tunnels are present (#1422)
|
|
||||||
- Fix Windows freeze due to ICMP error handling (#1412)
|
|
||||||
- Fix relay migration panic (#1403)
|
|
||||||
|
|
||||||
## [1.9.5] - 2024-12-05
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Gracefully ignore v2 certificates. (#1282)
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fix relays that refuse to re-establish after one of the remote tunnel pairs breaks. (#1277)
|
|
||||||
|
|
||||||
## [1.9.4] - 2024-09-09
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Support UDP dialing with gVisor. (#1181)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Make some Nebula state programmatically available via control object. (#1188)
|
|
||||||
- Switch internal representation of IPs to netip, to prepare for IPv6 support
|
|
||||||
in the overlay. (#1173)
|
|
||||||
- Minor build and cleanup changes. (#1171, #1164, #1162)
|
|
||||||
- Various dependency updates. (#1195, #1190, #1174, #1168, #1167, #1161, #1147, #1146)
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fix a bug on big endian hosts, like mips. (#1194)
|
|
||||||
- Fix a rare panic if a local index collision happens. (#1191)
|
|
||||||
- Fix integer wraparound in the calculation of handshake timeouts on 32-bit targets. (#1185)
|
|
||||||
|
|
||||||
## [1.9.3] - 2024-06-06
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Initialize messageCounter to 2 instead of verifying later. (#1156)
|
|
||||||
|
|
||||||
## [1.9.2] - 2024-06-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Ensure messageCounter is set before handshake is complete. (#1154)
|
|
||||||
|
|
||||||
## [1.9.1] - 2024-05-29
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed a potential deadlock in GetOrHandshake. (#1151)
|
|
||||||
|
|
||||||
## [1.9.0] - 2024-05-07
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
- This release adds a new setting `default_local_cidr_any` that defaults to
|
|
||||||
true to match previous behavior, but will default to false in the next
|
|
||||||
release (1.10). When set to false, `local_cidr` is matched correctly for
|
|
||||||
firewall rules on hosts acting as unsafe routers, and should be set for any
|
|
||||||
firewall rules you want to allow unsafe route hosts to access. See the issue
|
|
||||||
and example config for more details. (#1071, #1099)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Nebula now has an official Docker image `nebulaoss/nebula` that is
|
|
||||||
distroless and contains just the `nebula` and `nebula-cert` binaries. You
|
|
||||||
can find it here: https://hub.docker.com/r/nebulaoss/nebula (#1037)
|
|
||||||
|
|
||||||
- Experimental binaries for `loong64` are now provided. (#1003)
|
|
||||||
|
|
||||||
- Added example service script for OpenRC. (#711)
|
|
||||||
|
|
||||||
- The SSH daemon now supports inlined host keys. (#1054)
|
|
||||||
|
|
||||||
- The SSH daemon now supports certificates with `sshd.trusted_cas`. (#1098)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Config setting `tun.unsafe_routes` is now reloadable. (#1083)
|
|
||||||
|
|
||||||
- Small documentation and internal improvements. (#1065, #1067, #1069, #1108,
|
|
||||||
#1109, #1111, #1135)
|
|
||||||
|
|
||||||
- Various dependency updates. (#1139, #1138, #1134, #1133, #1126, #1123, #1110,
|
|
||||||
#1094, #1092, #1087, #1086, #1085, #1072, #1063, #1059, #1055, #1053, #1047,
|
|
||||||
#1046, #1034, #1022)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
- Support for the deprecated `local_range` option has been removed. Please
|
|
||||||
change to `preferred_ranges` (which is also now reloadable). (#1043)
|
|
||||||
|
|
||||||
- We are now building with go1.22, which means that for Windows you need at
|
|
||||||
least Windows 10 or Windows Server 2016. This is because support for earlier
|
|
||||||
versions was removed in Go 1.21. See https://go.dev/doc/go1.21#windows (#981)
|
|
||||||
|
|
||||||
- Removed vagrant example, as it was unmaintained. (#1129)
|
|
||||||
|
|
||||||
- Removed Fedora and Arch nebula.service files, as they are maintained in the
|
|
||||||
upstream repos. (#1128, #1132)
|
|
||||||
|
|
||||||
- Remove the TCP round trip tracking metrics, as they never had correct data
|
|
||||||
and were an experiment to begin with. (#1114)
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed a potential deadlock introduced in 1.8.1. (#1112)
|
|
||||||
|
|
||||||
- Fixed support for Linux when IPv6 has been disabled at the OS level. (#787)
|
|
||||||
|
|
||||||
- DNS will return NXDOMAIN now when there are no results. (#845)
|
|
||||||
|
|
||||||
- Allow `::` in `lighthouse.dns.host`. (#1115)
|
|
||||||
|
|
||||||
- Capitalization of `NotAfter` fixed in DNS TXT response. (#1127)
|
|
||||||
|
|
||||||
- Don't log invalid certificates. It is untrusted data and can cause a large
|
|
||||||
volume of logs. (#1116)
|
|
||||||
|
|
||||||
## [1.8.2] - 2024-01-08
|
## [1.8.2] - 2024-01-08
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
@@ -703,15 +558,7 @@ created.)
|
|||||||
|
|
||||||
- Initial public release.
|
- Initial public release.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.7...HEAD
|
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.8.2...HEAD
|
||||||
[1.9.7]: https://github.com/slackhq/nebula/releases/tag/v1.9.7
|
|
||||||
[1.9.6]: https://github.com/slackhq/nebula/releases/tag/v1.9.6
|
|
||||||
[1.9.5]: https://github.com/slackhq/nebula/releases/tag/v1.9.5
|
|
||||||
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
|
|
||||||
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
|
|
||||||
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
|
|
||||||
[1.9.1]: https://github.com/slackhq/nebula/releases/tag/v1.9.1
|
|
||||||
[1.9.0]: https://github.com/slackhq/nebula/releases/tag/v1.9.0
|
|
||||||
[1.8.2]: https://github.com/slackhq/nebula/releases/tag/v1.8.2
|
[1.8.2]: https://github.com/slackhq/nebula/releases/tag/v1.8.2
|
||||||
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
|
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
|
||||||
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
|
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
|
||||||
|
|||||||
@@ -33,5 +33,6 @@ l.WithError(err).
|
|||||||
WithField("vpnIp", IntIp(hostinfo.hostId)).
|
WithField("vpnIp", IntIp(hostinfo.hostId)).
|
||||||
WithField("udpAddr", addr).
|
WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix"}).
|
WithField("handshake", m{"stage": 1, "style": "ix"}).
|
||||||
|
WithField("cert", remoteCert).
|
||||||
Info("Invalid certificate from host")
|
Info("Invalid certificate from host")
|
||||||
```
|
```
|
||||||
31
Makefile
31
Makefile
@@ -1,14 +1,22 @@
|
|||||||
|
GOMINVERSION = 1.20
|
||||||
NEBULA_CMD_PATH = "./cmd/nebula"
|
NEBULA_CMD_PATH = "./cmd/nebula"
|
||||||
|
GO111MODULE = on
|
||||||
|
export GO111MODULE
|
||||||
CGO_ENABLED = 0
|
CGO_ENABLED = 0
|
||||||
export CGO_ENABLED
|
export CGO_ENABLED
|
||||||
|
|
||||||
# Set up OS specific bits
|
# Set up OS specific bits
|
||||||
ifeq ($(OS),Windows_NT)
|
ifeq ($(OS),Windows_NT)
|
||||||
|
#TODO: we should be able to ditch awk as well
|
||||||
|
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
|
||||||
|
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
|
||||||
NEBULA_CMD_SUFFIX = .exe
|
NEBULA_CMD_SUFFIX = .exe
|
||||||
NULL_FILE = nul
|
NULL_FILE = nul
|
||||||
# RIO on windows does pointer stuff that makes go vet angry
|
# RIO on windows does pointer stuff that makes go vet angry
|
||||||
VET_FLAGS = -unsafeptr=false
|
VET_FLAGS = -unsafeptr=false
|
||||||
else
|
else
|
||||||
|
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
|
||||||
|
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
|
||||||
NEBULA_CMD_SUFFIX =
|
NEBULA_CMD_SUFFIX =
|
||||||
NULL_FILE = /dev/null
|
NULL_FILE = /dev/null
|
||||||
endif
|
endif
|
||||||
@@ -22,9 +30,6 @@ ifndef BUILD_NUMBER
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
DOCKER_IMAGE_REPO ?= nebulaoss/nebula
|
|
||||||
DOCKER_IMAGE_TAG ?= latest
|
|
||||||
|
|
||||||
LDFLAGS = -X main.Build=$(BUILD_NUMBER)
|
LDFLAGS = -X main.Build=$(BUILD_NUMBER)
|
||||||
|
|
||||||
ALL_LINUX = linux-amd64 \
|
ALL_LINUX = linux-amd64 \
|
||||||
@@ -39,8 +44,7 @@ ALL_LINUX = linux-amd64 \
|
|||||||
linux-mips64 \
|
linux-mips64 \
|
||||||
linux-mips64le \
|
linux-mips64le \
|
||||||
linux-mips-softfloat \
|
linux-mips-softfloat \
|
||||||
linux-riscv64 \
|
linux-riscv64
|
||||||
linux-loong64
|
|
||||||
|
|
||||||
ALL_FREEBSD = freebsd-amd64 \
|
ALL_FREEBSD = freebsd-amd64 \
|
||||||
freebsd-arm64
|
freebsd-arm64
|
||||||
@@ -78,12 +82,8 @@ e2evvvv: e2ev
|
|||||||
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
|
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
|
||||||
e2e-bench: e2e
|
e2e-bench: e2e
|
||||||
|
|
||||||
DOCKER_BIN = build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
|
||||||
|
|
||||||
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
||||||
|
|
||||||
docker: docker/linux-$(shell go env GOARCH)
|
|
||||||
|
|
||||||
release: $(ALL:%=build/nebula-%.tar.gz)
|
release: $(ALL:%=build/nebula-%.tar.gz)
|
||||||
|
|
||||||
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
||||||
@@ -133,8 +133,6 @@ build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
|||||||
# boringcrypto
|
# boringcrypto
|
||||||
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
build/linux-amd64-boringcrypto/%: LDFLAGS += -checklinkname=0
|
|
||||||
build/linux-arm64-boringcrypto/%: LDFLAGS += -checklinkname=0
|
|
||||||
|
|
||||||
build/%/nebula: .FORCE
|
build/%/nebula: .FORCE
|
||||||
GOOS=$(firstword $(subst -, , $*)) \
|
GOOS=$(firstword $(subst -, , $*)) \
|
||||||
@@ -158,9 +156,6 @@ build/nebula-%.tar.gz: build/%/nebula build/%/nebula-cert
|
|||||||
build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
|
build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
|
||||||
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
|
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
|
||||||
|
|
||||||
docker/%: build/%/nebula build/%/nebula-cert
|
|
||||||
docker build . $(DOCKER_BUILD_ARGS) -f docker/Dockerfile --platform "$(subst -,/,$*)" --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:$(BUILD_NUMBER)"
|
|
||||||
|
|
||||||
vet:
|
vet:
|
||||||
go vet $(VET_FLAGS) -v ./...
|
go vet $(VET_FLAGS) -v ./...
|
||||||
|
|
||||||
@@ -168,7 +163,7 @@ test:
|
|||||||
go test -v ./...
|
go test -v ./...
|
||||||
|
|
||||||
test-boringcrypto:
|
test-boringcrypto:
|
||||||
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -ldflags "-checklinkname=0" -v ./...
|
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -v ./...
|
||||||
|
|
||||||
test-cov-html:
|
test-cov-html:
|
||||||
go test -coverprofile=coverage.out
|
go test -coverprofile=coverage.out
|
||||||
@@ -224,10 +219,6 @@ smoke-docker-race: BUILD_ARGS = -race
|
|||||||
smoke-docker-race: CGO_ENABLED = 1
|
smoke-docker-race: CGO_ENABLED = 1
|
||||||
smoke-docker-race: smoke-docker
|
smoke-docker-race: smoke-docker
|
||||||
|
|
||||||
smoke-vagrant/%: bin-docker build/%/nebula
|
|
||||||
cd .github/workflows/smoke/ && ./build.sh $*
|
|
||||||
cd .github/workflows/smoke/ && ./smoke-vagrant.sh $*
|
|
||||||
|
|
||||||
.FORCE:
|
.FORCE:
|
||||||
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/%
|
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html
|
||||||
.DEFAULT_GOAL := bin
|
.DEFAULT_GOAL := bin
|
||||||
|
|||||||
@@ -52,11 +52,6 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
|||||||
$ brew install nebula
|
$ brew install nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
|
|
||||||
```
|
|
||||||
$ docker pull nebulaoss/nebula
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Mobile
|
#### Mobile
|
||||||
|
|
||||||
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
||||||
|
|||||||
@@ -2,16 +2,17 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AllowList struct {
|
type AllowList struct {
|
||||||
// The values of this cidrTree are `bool`, signifying allow/deny
|
// The values of this cidrTree are `bool`, signifying allow/deny
|
||||||
cidrTree *bart.Table[bool]
|
cidrTree *cidr.Tree6[bool]
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteAllowList struct {
|
type RemoteAllowList struct {
|
||||||
@@ -19,7 +20,7 @@ type RemoteAllowList struct {
|
|||||||
|
|
||||||
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
||||||
// are *AllowList
|
// are *AllowList
|
||||||
insideAllowLists *bart.Table[*AllowList]
|
insideAllowLists *cidr.Tree6[*AllowList]
|
||||||
}
|
}
|
||||||
|
|
||||||
type LocalAllowList struct {
|
type LocalAllowList struct {
|
||||||
@@ -87,7 +88,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||||
}
|
}
|
||||||
|
|
||||||
tree := new(bart.Table[bool])
|
tree := cidr.NewTree6[bool]()
|
||||||
|
|
||||||
// Keep track of the rules we have added for both ipv4 and ipv6
|
// Keep track of the rules we have added for both ipv4 and ipv6
|
||||||
type allowListRules struct {
|
type allowListRules struct {
|
||||||
@@ -121,20 +122,18 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||||||
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet, err := netip.ParsePrefix(rawCIDR)
|
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet = netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits())
|
|
||||||
|
|
||||||
// TODO: should we error on duplicate CIDRs in the config?
|
// TODO: should we error on duplicate CIDRs in the config?
|
||||||
tree.Insert(ipNet, value)
|
tree.AddCIDR(ipNet, value)
|
||||||
|
|
||||||
maskBits := ipNet.Bits()
|
maskBits, maskSize := ipNet.Mask.Size()
|
||||||
|
|
||||||
var rules *allowListRules
|
var rules *allowListRules
|
||||||
if ipNet.Addr().Is4() {
|
if maskSize == 32 {
|
||||||
rules = &rules4
|
rules = &rules4
|
||||||
} else {
|
} else {
|
||||||
rules = &rules6
|
rules = &rules6
|
||||||
@@ -157,7 +156,8 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||||||
|
|
||||||
if !rules4.defaultSet {
|
if !rules4.defaultSet {
|
||||||
if rules4.allValuesMatch {
|
if rules4.allValuesMatch {
|
||||||
tree.Insert(netip.PrefixFrom(netip.IPv4Unspecified(), 0), !rules4.allValues)
|
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
|
||||||
|
tree.AddCIDR(zeroCIDR, !rules4.allValues)
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
|
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
|
||||||
}
|
}
|
||||||
@@ -165,7 +165,8 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||||||
|
|
||||||
if !rules6.defaultSet {
|
if !rules6.defaultSet {
|
||||||
if rules6.allValuesMatch {
|
if rules6.allValuesMatch {
|
||||||
tree.Insert(netip.PrefixFrom(netip.IPv6Unspecified(), 0), !rules6.allValues)
|
_, zeroCIDR, _ := net.ParseCIDR("::/0")
|
||||||
|
tree.AddCIDR(zeroCIDR, !rules6.allValues)
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
|
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
|
||||||
}
|
}
|
||||||
@@ -217,13 +218,13 @@ func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error
|
|||||||
return nameRules, nil
|
return nameRules, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error) {
|
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6[*AllowList], error) {
|
||||||
value := c.Get(k)
|
value := c.Get(k)
|
||||||
if value == nil {
|
if value == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteAllowRanges := new(bart.Table[*AllowList])
|
remoteAllowRanges := cidr.NewTree6[*AllowList]()
|
||||||
|
|
||||||
rawMap, ok := value.(map[interface{}]interface{})
|
rawMap, ok := value.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -240,27 +241,45 @@ func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet, err := netip.ParsePrefix(rawCIDR)
|
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteAllowRanges.Insert(netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits()), allowList)
|
remoteAllowRanges.AddCIDR(ipNet, allowList)
|
||||||
}
|
}
|
||||||
|
|
||||||
return remoteAllowRanges, nil
|
return remoteAllowRanges, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *AllowList) Allow(ip netip.Addr) bool {
|
func (al *AllowList) Allow(ip net.IP) bool {
|
||||||
if al == nil {
|
if al == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
result, _ := al.cidrTree.Lookup(ip)
|
_, result := al.cidrTree.MostSpecificContains(ip)
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *LocalAllowList) Allow(ip netip.Addr) bool {
|
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
_, result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
_, result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *LocalAllowList) Allow(ip net.IP) bool {
|
||||||
if al == nil {
|
if al == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -282,23 +301,43 @@ func (al *LocalAllowList) AllowName(name string) bool {
|
|||||||
return !al.nameRules[0].Allow
|
return !al.nameRules[0].Allow
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *RemoteAllowList) AllowUnknownVpnIp(ip netip.Addr) bool {
|
func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool {
|
||||||
if al == nil {
|
if al == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return al.AllowList.Allow(ip)
|
return al.AllowList.Allow(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *RemoteAllowList) Allow(vpnIp netip.Addr, ip netip.Addr) bool {
|
func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool {
|
||||||
if !al.getInsideAllowList(vpnIp).Allow(ip) {
|
if !al.getInsideAllowList(vpnIp).Allow(ip) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return al.AllowList.Allow(ip)
|
return al.AllowList.Allow(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *RemoteAllowList) getInsideAllowList(vpnIp netip.Addr) *AllowList {
|
func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return al.AllowList.AllowIpV4(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
|
||||||
|
if al == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return al.AllowList.AllowIpV6(hi, lo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
|
||||||
if al.insideAllowLists != nil {
|
if al.insideAllowLists != nil {
|
||||||
inside, ok := al.insideAllowLists.Lookup(vpnIp)
|
ok, inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
||||||
if ok {
|
if ok {
|
||||||
return inside
|
return inside
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"net"
|
||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -18,7 +18,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||||||
"192.168.0.0": true,
|
"192.168.0.0": true,
|
||||||
}
|
}
|
||||||
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
||||||
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0. netip.ParsePrefix(\"192.168.0.0\"): no '/'")
|
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
|
||||||
assert.Nil(t, r)
|
assert.Nil(t, r)
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
@@ -98,26 +98,26 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAllowList_Allow(t *testing.T) {
|
func TestAllowList_Allow(t *testing.T) {
|
||||||
assert.Equal(t, true, ((*AllowList)(nil)).Allow(netip.MustParseAddr("1.1.1.1")))
|
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
|
||||||
|
|
||||||
tree := new(bart.Table[bool])
|
tree := cidr.NewTree6[bool]()
|
||||||
tree.Insert(netip.MustParsePrefix("0.0.0.0/0"), true)
|
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
|
||||||
tree.Insert(netip.MustParsePrefix("10.0.0.0/8"), false)
|
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
|
||||||
tree.Insert(netip.MustParsePrefix("10.42.42.42/32"), true)
|
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
|
||||||
tree.Insert(netip.MustParsePrefix("10.42.0.0/16"), true)
|
tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true)
|
||||||
tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), true)
|
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true)
|
||||||
tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), false)
|
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false)
|
||||||
tree.Insert(netip.MustParsePrefix("::1/128"), true)
|
tree.AddCIDR(cidr.Parse("::1/128"), true)
|
||||||
tree.Insert(netip.MustParsePrefix("::2/128"), false)
|
tree.AddCIDR(cidr.Parse("::2/128"), false)
|
||||||
al := &AllowList{cidrTree: tree}
|
al := &AllowList{cidrTree: tree}
|
||||||
|
|
||||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("1.1.1.1")))
|
assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1")))
|
||||||
assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.0.0.4")))
|
assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4")))
|
||||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.42.42")))
|
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42")))
|
||||||
assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.42.42.41")))
|
assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41")))
|
||||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.0.1")))
|
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1")))
|
||||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("::1")))
|
assert.Equal(t, true, al.Allow(net.ParseIP("::1")))
|
||||||
assert.Equal(t, false, al.Allow(netip.MustParseAddr("::2")))
|
assert.Equal(t, false, al.Allow(net.ParseIP("::2")))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLocalAllowList_AllowName(t *testing.T) {
|
func TestLocalAllowList_AllowName(t *testing.T) {
|
||||||
|
|||||||
@@ -1,36 +1,41 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This allows us to "guess" what the remote might be for a host while we wait
|
// This allows us to "guess" what the remote might be for a host while we wait
|
||||||
// for the lighthouse response. See "lighthouse.calculated_remotes" in the
|
// for the lighthouse response. See "lighthouse.calculated_remotes" in the
|
||||||
// example config file.
|
// example config file.
|
||||||
type calculatedRemote struct {
|
type calculatedRemote struct {
|
||||||
ipNet netip.Prefix
|
ipNet net.IPNet
|
||||||
mask netip.Prefix
|
maskIP iputil.VpnIp
|
||||||
port uint32
|
mask iputil.VpnIp
|
||||||
|
port uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCalculatedRemote(maskCidr netip.Prefix, port int) (*calculatedRemote, error) {
|
func newCalculatedRemote(ipNet *net.IPNet, port int) (*calculatedRemote, error) {
|
||||||
masked := maskCidr.Masked()
|
// Ensure this is an IPv4 mask that we expect
|
||||||
|
ones, bits := ipNet.Mask.Size()
|
||||||
|
if ones == 0 || bits != 32 {
|
||||||
|
return nil, fmt.Errorf("invalid mask: %v", ipNet)
|
||||||
|
}
|
||||||
if port < 0 || port > math.MaxUint16 {
|
if port < 0 || port > math.MaxUint16 {
|
||||||
return nil, fmt.Errorf("invalid port: %d", port)
|
return nil, fmt.Errorf("invalid port: %d", port)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &calculatedRemote{
|
return &calculatedRemote{
|
||||||
ipNet: maskCidr,
|
ipNet: *ipNet,
|
||||||
mask: masked,
|
maskIP: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
port: uint32(port),
|
mask: iputil.Ip2VpnIp(ipNet.Mask),
|
||||||
|
port: uint32(port),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,41 +43,21 @@ func (c *calculatedRemote) String() string {
|
|||||||
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *calculatedRemote) Apply(ip netip.Addr) *Ip4AndPort {
|
func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
|
||||||
// Combine the masked bytes of the "mask" IP with the unmasked bytes
|
// Combine the masked bytes of the "mask" IP with the unmasked bytes
|
||||||
// of the overlay IP
|
// of the overlay IP
|
||||||
if c.ipNet.Addr().Is4() {
|
masked := (c.maskIP & c.mask) | (ip & ^c.mask)
|
||||||
return c.apply4(ip)
|
|
||||||
}
|
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
|
||||||
return c.apply6(ip)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *calculatedRemote) apply4(ip netip.Addr) *Ip4AndPort {
|
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4[[]*calculatedRemote], error) {
|
||||||
//TODO: IPV6-WORK this can be less crappy
|
|
||||||
maskb := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
|
|
||||||
mask := binary.BigEndian.Uint32(maskb[:])
|
|
||||||
|
|
||||||
b := c.mask.Addr().As4()
|
|
||||||
maskIp := binary.BigEndian.Uint32(b[:])
|
|
||||||
|
|
||||||
b = ip.As4()
|
|
||||||
intIp := binary.BigEndian.Uint32(b[:])
|
|
||||||
|
|
||||||
return &Ip4AndPort{(maskIp & mask) | (intIp & ^mask), c.port}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *calculatedRemote) apply6(ip netip.Addr) *Ip4AndPort {
|
|
||||||
//TODO: IPV6-WORK
|
|
||||||
panic("Can not calculate ipv6 remote addresses")
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calculatedRemote], error) {
|
|
||||||
value := c.Get(k)
|
value := c.Get(k)
|
||||||
if value == nil {
|
if value == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
calculatedRemotes := cidr.NewTree4[[]*calculatedRemote]()
|
||||||
|
|
||||||
rawMap, ok := value.(map[any]any)
|
rawMap, ok := value.(map[any]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -84,18 +69,17 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calcu
|
|||||||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
cidr, err := netip.ParsePrefix(rawCIDR)
|
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: IPV6-WORK this does not verify that rawValue contains the same bits as cidr here
|
|
||||||
entry, err := newCalculatedRemotesListFromConfig(rawValue)
|
entry, err := newCalculatedRemotesListFromConfig(rawValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
calculatedRemotes.Insert(cidr, entry)
|
calculatedRemotes.AddCIDR(ipNet, entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
return calculatedRemotes, nil
|
return calculatedRemotes, nil
|
||||||
@@ -133,7 +117,7 @@ func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
|
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
|
||||||
}
|
}
|
||||||
maskCidr, err := netip.ParsePrefix(rawMask)
|
_, ipNet, err := net.ParseCIDR(rawMask)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid mask: %s", rawMask)
|
return nil, fmt.Errorf("invalid mask: %s", rawMask)
|
||||||
}
|
}
|
||||||
@@ -155,5 +139,5 @@ func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
|||||||
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newCalculatedRemote(maskCidr, port)
|
return newCalculatedRemote(ipNet, port)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,25 +1,27 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCalculatedRemoteApply(t *testing.T) {
|
func TestCalculatedRemoteApply(t *testing.T) {
|
||||||
ipNet, err := netip.ParsePrefix("192.168.1.0/24")
|
_, ipNet, err := net.ParseCIDR("192.168.1.0/24")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
c, err := newCalculatedRemote(ipNet, 4242)
|
c, err := newCalculatedRemote(ipNet, 4242)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
input, err := netip.ParseAddr("10.0.10.182")
|
input := iputil.Ip2VpnIp([]byte{10, 0, 10, 182})
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
expected, err := netip.ParseAddr("192.168.1.182")
|
expected := &Ip4AndPort{
|
||||||
assert.NoError(t, err)
|
Ip: uint32(iputil.Ip2VpnIp([]byte{192, 168, 1, 182})),
|
||||||
|
Port: 4242,
|
||||||
|
}
|
||||||
|
|
||||||
assert.Equal(t, NewIp4AndPortFromNetIP(expected, 4242), c.Apply(input))
|
assert.Equal(t, expected, c.Apply(input))
|
||||||
}
|
}
|
||||||
|
|||||||
28
cert/ca.go
28
cert/ca.go
@@ -24,39 +24,31 @@ func NewCAPool() *NebulaCAPool {
|
|||||||
|
|
||||||
// NewCAPoolFromBytes will create a new CA pool from the provided
|
// NewCAPoolFromBytes will create a new CA pool from the provided
|
||||||
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
||||||
// If the pool contains unsupported certificates, they will generate warnings
|
|
||||||
// in the []error return arg.
|
|
||||||
// If the pool contains any expired certificates, an ErrExpired will be
|
// If the pool contains any expired certificates, an ErrExpired will be
|
||||||
// returned along with the pool. The caller must handle any such errors.
|
// returned along with the pool. The caller must handle any such errors.
|
||||||
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, []error, error) {
|
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) {
|
||||||
pool := NewCAPool()
|
pool := NewCAPool()
|
||||||
var err error
|
var err error
|
||||||
var warnings []error
|
var expired bool
|
||||||
good := 0
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
caPEMs, err = pool.AddCACertificate(caPEMs)
|
caPEMs, err = pool.AddCACertificate(caPEMs)
|
||||||
if errors.Is(err, ErrExpired) {
|
if errors.Is(err, ErrExpired) {
|
||||||
warnings = append(warnings, err)
|
expired = true
|
||||||
} else if errors.Is(err, ErrInvalidPEMCertificateUnsupported) {
|
err = nil
|
||||||
warnings = append(warnings, err)
|
}
|
||||||
} else if err != nil {
|
if err != nil {
|
||||||
return nil, warnings, err
|
return nil, err
|
||||||
} else {
|
|
||||||
// Only consider a good certificate if there were no errors present
|
|
||||||
good++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if good == 0 {
|
if expired {
|
||||||
return nil, warnings, errors.New("no valid CA certificates present")
|
return pool, ErrExpired
|
||||||
}
|
}
|
||||||
|
|
||||||
return pool, warnings, nil
|
return pool, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool
|
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ const publicKeyLen = 32
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
CertBanner = "NEBULA CERTIFICATE"
|
CertBanner = "NEBULA CERTIFICATE"
|
||||||
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
|
||||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||||
@@ -164,9 +163,6 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
|
|||||||
if p == nil {
|
if p == nil {
|
||||||
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
if p.Type == CertificateV2Banner {
|
|
||||||
return nil, r, fmt.Errorf("%w: %s", ErrInvalidPEMCertificateUnsupported, p.Type)
|
|
||||||
}
|
|
||||||
if p.Type != CertBanner {
|
if p.Type != CertBanner {
|
||||||
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
|
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
|
||||||
}
|
}
|
||||||
@@ -328,7 +324,7 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
|
|||||||
return k.Bytes, r, nil
|
return k.Bytes, r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalNebulaEncryptedData will unmarshal a protobuf byte representation of a nebula cert into its
|
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert into its
|
||||||
// protobuf-generated struct.
|
// protobuf-generated struct.
|
||||||
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@@ -573,13 +572,6 @@ CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
|||||||
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
|
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
|
||||||
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
||||||
-----END NEBULA CERTIFICATE-----
|
-----END NEBULA CERTIFICATE-----
|
||||||
`
|
|
||||||
|
|
||||||
v2 := `
|
|
||||||
# valid PEM with the V2 header
|
|
||||||
-----BEGIN NEBULA CERTIFICATE V2-----
|
|
||||||
CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
|
||||||
-----END NEBULA CERTIFICATE V2-----
|
|
||||||
`
|
`
|
||||||
|
|
||||||
rootCA := NebulaCertificate{
|
rootCA := NebulaCertificate{
|
||||||
@@ -600,46 +592,33 @@ CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
p, warn, err := NewCAPoolFromBytes([]byte(noNewLines))
|
p, err := NewCAPoolFromBytes([]byte(noNewLines))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Nil(t, warn)
|
|
||||||
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
|
|
||||||
pp, warn, err := NewCAPoolFromBytes([]byte(withNewLines))
|
pp, err := NewCAPoolFromBytes([]byte(withNewLines))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Nil(t, warn)
|
|
||||||
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
|
|
||||||
// expired cert, no valid certs
|
// expired cert, no valid certs
|
||||||
ppp, warn, err := NewCAPoolFromBytes([]byte(expired))
|
ppp, err := NewCAPoolFromBytes([]byte(expired))
|
||||||
assert.Error(t, err, "no valid CA certificates present")
|
assert.Equal(t, ErrExpired, err)
|
||||||
assert.Len(t, warn, 1)
|
assert.Equal(t, ppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
||||||
assert.Error(t, warn[0], ErrExpired)
|
|
||||||
assert.Nil(t, ppp)
|
|
||||||
|
|
||||||
// expired cert, with valid certs
|
// expired cert, with valid certs
|
||||||
pppp, warn, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
|
pppp, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
|
||||||
assert.Len(t, warn, 1)
|
assert.Equal(t, ErrExpired, err)
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Error(t, warn[0], ErrExpired)
|
|
||||||
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
||||||
assert.Equal(t, len(pppp.CAs), 3)
|
assert.Equal(t, len(pppp.CAs), 3)
|
||||||
|
|
||||||
ppppp, warn, err := NewCAPoolFromBytes([]byte(p256))
|
ppppp, err := NewCAPoolFromBytes([]byte(p256))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Nil(t, warn)
|
|
||||||
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
||||||
assert.Equal(t, len(ppppp.CAs), 1)
|
assert.Equal(t, len(ppppp.CAs), 1)
|
||||||
|
|
||||||
pppppp, warn, err := NewCAPoolFromBytes(append([]byte(p256), []byte(v2)...))
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.True(t, errors.Is(warn[0], ErrInvalidPEMCertificateUnsupported))
|
|
||||||
assert.Equal(t, pppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
|
||||||
assert.Equal(t, len(pppppp.CAs), 1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendByteSlices(b ...[]byte) []byte {
|
func appendByteSlices(b ...[]byte) []byte {
|
||||||
|
|||||||
@@ -5,11 +5,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrRootExpired = errors.New("root certificate is expired")
|
ErrRootExpired = errors.New("root certificate is expired")
|
||||||
ErrExpired = errors.New("certificate is expired")
|
ErrExpired = errors.New("certificate is expired")
|
||||||
ErrNotCA = errors.New("certificate is not a CA")
|
ErrNotCA = errors.New("certificate is not a CA")
|
||||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||||
ErrBlockListed = errors.New("certificate is in the block list")
|
ErrBlockListed = errors.New("certificate is in the block list")
|
||||||
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||||
ErrInvalidPEMCertificateUnsupported = errors.New("bytes contain an unsupported certificate format")
|
|
||||||
)
|
)
|
||||||
|
|||||||
10
cidr/parse.go
Normal file
10
cidr/parse.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
|
// Parse is a convenience function that returns only the IPNet
|
||||||
|
// This function ignores errors since it is primarily a test helper, the result could be nil
|
||||||
|
func Parse(s string) *net.IPNet {
|
||||||
|
_, c, _ := net.ParseCIDR(s)
|
||||||
|
return c
|
||||||
|
}
|
||||||
203
cidr/tree4.go
Normal file
203
cidr/tree4.go
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Node[T any] struct {
|
||||||
|
left *Node[T]
|
||||||
|
right *Node[T]
|
||||||
|
parent *Node[T]
|
||||||
|
hasValue bool
|
||||||
|
value T
|
||||||
|
}
|
||||||
|
|
||||||
|
type entry[T any] struct {
|
||||||
|
CIDR *net.IPNet
|
||||||
|
Value T
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tree4[T any] struct {
|
||||||
|
root *Node[T]
|
||||||
|
list []entry[T]
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
startbit = iputil.VpnIp(0x80000000)
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewTree4[T any]() *Tree4[T] {
|
||||||
|
tree := new(Tree4[T])
|
||||||
|
tree.root = &Node[T]{}
|
||||||
|
tree.list = []entry[T]{}
|
||||||
|
return tree
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tree *Tree4[T]) AddCIDR(cidr *net.IPNet, val T) {
|
||||||
|
bit := startbit
|
||||||
|
node := tree.root
|
||||||
|
next := tree.root
|
||||||
|
|
||||||
|
ip := iputil.Ip2VpnIp(cidr.IP)
|
||||||
|
mask := iputil.Ip2VpnIp(cidr.Mask)
|
||||||
|
|
||||||
|
// Find our last ancestor in the tree
|
||||||
|
for bit&mask != 0 {
|
||||||
|
if ip&bit != 0 {
|
||||||
|
next = node.right
|
||||||
|
} else {
|
||||||
|
next = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
if next == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
bit = bit >> 1
|
||||||
|
node = next
|
||||||
|
}
|
||||||
|
|
||||||
|
// We already have this range so update the value
|
||||||
|
if next != nil {
|
||||||
|
addCIDR := cidr.String()
|
||||||
|
for i, v := range tree.list {
|
||||||
|
if addCIDR == v.CIDR.String() {
|
||||||
|
tree.list = append(tree.list[:i], tree.list[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
|
||||||
|
node.value = val
|
||||||
|
node.hasValue = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build up the rest of the tree we don't already have
|
||||||
|
for bit&mask != 0 {
|
||||||
|
next = &Node[T]{}
|
||||||
|
next.parent = node
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node.right = next
|
||||||
|
} else {
|
||||||
|
node.left = next
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
node = next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final node marks our cidr, set the value
|
||||||
|
node.value = val
|
||||||
|
node.hasValue = true
|
||||||
|
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains finds the first match, which may be the least specific
|
||||||
|
func (tree *Tree4[T]) Contains(ip iputil.VpnIp) (ok bool, value T) {
|
||||||
|
bit := startbit
|
||||||
|
node := tree.root
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.hasValue {
|
||||||
|
return true, node.value
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, value
|
||||||
|
}
|
||||||
|
|
||||||
|
// MostSpecificContains finds the most specific match
|
||||||
|
func (tree *Tree4[T]) MostSpecificContains(ip iputil.VpnIp) (ok bool, value T) {
|
||||||
|
bit := startbit
|
||||||
|
node := tree.root
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.hasValue {
|
||||||
|
value = node.value
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok, value
|
||||||
|
}
|
||||||
|
|
||||||
|
type eachFunc[T any] func(T) bool
|
||||||
|
|
||||||
|
// EachContains will call a function, passing the value, for each entry until the function returns true or the search is complete
|
||||||
|
// The final return value will be true if the provided function returned true
|
||||||
|
func (tree *Tree4[T]) EachContains(ip iputil.VpnIp, each eachFunc[T]) bool {
|
||||||
|
bit := startbit
|
||||||
|
node := tree.root
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.hasValue {
|
||||||
|
// If the each func returns true then we can exit the loop
|
||||||
|
if each(node.value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCIDR returns the entry added by the most recent matching AddCIDR call
|
||||||
|
func (tree *Tree4[T]) GetCIDR(cidr *net.IPNet) (ok bool, value T) {
|
||||||
|
bit := startbit
|
||||||
|
node := tree.root
|
||||||
|
|
||||||
|
ip := iputil.Ip2VpnIp(cidr.IP)
|
||||||
|
mask := iputil.Ip2VpnIp(cidr.Mask)
|
||||||
|
|
||||||
|
// Find our last ancestor in the tree
|
||||||
|
for node != nil && bit&mask != 0 {
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit = bit >> 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if bit&mask == 0 && node != nil {
|
||||||
|
value = node.value
|
||||||
|
ok = node.hasValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok, value
|
||||||
|
}
|
||||||
|
|
||||||
|
// List will return all CIDRs and their current values. Do not modify the contents!
|
||||||
|
func (tree *Tree4[T]) List() []entry[T] {
|
||||||
|
return tree.list
|
||||||
|
}
|
||||||
170
cidr/tree4_test.go
Normal file
170
cidr/tree4_test.go
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCIDRTree_List(t *testing.T) {
|
||||||
|
tree := NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/16"), "4")
|
||||||
|
list := tree.List()
|
||||||
|
assert.Len(t, list, 2)
|
||||||
|
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
|
||||||
|
assert.Equal(t, "2", list[0].Value)
|
||||||
|
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
|
||||||
|
assert.Equal(t, "4", list[1].Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRTree_Contains(t *testing.T) {
|
||||||
|
tree := NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
|
||||||
|
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
|
||||||
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{true, "1", "1.0.0.0"},
|
||||||
|
{true, "1", "1.255.255.255"},
|
||||||
|
{true, "2", "2.1.0.0"},
|
||||||
|
{true, "2", "2.1.255.255"},
|
||||||
|
{true, "3", "3.1.1.0"},
|
||||||
|
{true, "3", "3.1.1.255"},
|
||||||
|
{true, "4a", "4.1.1.255"},
|
||||||
|
{true, "4a", "4.1.1.1"},
|
||||||
|
{true, "5", "240.0.0.0"},
|
||||||
|
{true, "5", "255.255.255.255"},
|
||||||
|
{false, "", "239.0.0.0"},
|
||||||
|
{false, "", "4.1.2.2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
tree = NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
|
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
||||||
|
tree := NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/30"), "4b")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||||
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{true, "1", "1.0.0.0"},
|
||||||
|
{true, "1", "1.255.255.255"},
|
||||||
|
{true, "2", "2.1.0.0"},
|
||||||
|
{true, "2", "2.1.255.255"},
|
||||||
|
{true, "3", "3.1.1.0"},
|
||||||
|
{true, "3", "3.1.1.255"},
|
||||||
|
{true, "4a", "4.1.1.255"},
|
||||||
|
{true, "4b", "4.1.1.2"},
|
||||||
|
{true, "4c", "4.1.1.1"},
|
||||||
|
{true, "5", "240.0.0.0"},
|
||||||
|
{true, "5", "255.255.255.255"},
|
||||||
|
{false, "", "239.0.0.0"},
|
||||||
|
{false, "", "4.1.2.2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
tree = NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
|
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTree4_GetCIDR(t *testing.T) {
|
||||||
|
tree := NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
|
||||||
|
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
|
||||||
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
|
Result interface{}
|
||||||
|
IPNet *net.IPNet
|
||||||
|
}{
|
||||||
|
{true, "1", Parse("1.0.0.0/8")},
|
||||||
|
{true, "2", Parse("2.1.0.0/16")},
|
||||||
|
{true, "3", Parse("3.1.1.0/24")},
|
||||||
|
{true, "4a", Parse("4.1.1.0/24")},
|
||||||
|
{true, "4b", Parse("4.1.1.1/32")},
|
||||||
|
{true, "4c", Parse("4.1.2.1/32")},
|
||||||
|
{true, "5", Parse("254.0.0.0/4")},
|
||||||
|
{false, "", Parse("2.0.0.0/8")},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
ok, r := tree.GetCIDR(tt.IPNet)
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
||||||
|
tree := NewTree4[string]()
|
||||||
|
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||||
|
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||||
|
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||||
|
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||||
|
|
||||||
|
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||||
|
b.Run("found", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
tree.Contains(ip)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||||
|
b.Run("not found", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
tree.Contains(ip)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
189
cidr/tree6.go
Normal file
189
cidr/tree6.go
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
const startbit6 = uint64(1 << 63)
|
||||||
|
|
||||||
|
type Tree6[T any] struct {
|
||||||
|
root4 *Node[T]
|
||||||
|
root6 *Node[T]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTree6[T any]() *Tree6[T] {
|
||||||
|
tree := new(Tree6[T])
|
||||||
|
tree.root4 = &Node[T]{}
|
||||||
|
tree.root6 = &Node[T]{}
|
||||||
|
return tree
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tree *Tree6[T]) AddCIDR(cidr *net.IPNet, val T) {
|
||||||
|
var node, next *Node[T]
|
||||||
|
|
||||||
|
cidrIP, ipv4 := isIPV4(cidr.IP)
|
||||||
|
if ipv4 {
|
||||||
|
node = tree.root4
|
||||||
|
next = tree.root4
|
||||||
|
|
||||||
|
} else {
|
||||||
|
node = tree.root6
|
||||||
|
next = tree.root6
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(cidrIP); i += 4 {
|
||||||
|
ip := iputil.Ip2VpnIp(cidrIP[i : i+4])
|
||||||
|
mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4])
|
||||||
|
bit := startbit
|
||||||
|
|
||||||
|
// Find our last ancestor in the tree
|
||||||
|
for bit&mask != 0 {
|
||||||
|
if ip&bit != 0 {
|
||||||
|
next = node.right
|
||||||
|
} else {
|
||||||
|
next = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
if next == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
bit = bit >> 1
|
||||||
|
node = next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build up the rest of the tree we don't already have
|
||||||
|
for bit&mask != 0 {
|
||||||
|
next = &Node[T]{}
|
||||||
|
next.parent = node
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node.right = next
|
||||||
|
} else {
|
||||||
|
node.left = next
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
node = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final node marks our cidr, set the value
|
||||||
|
node.value = val
|
||||||
|
node.hasValue = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds the most specific match
|
||||||
|
func (tree *Tree6[T]) MostSpecificContains(ip net.IP) (ok bool, value T) {
|
||||||
|
var node *Node[T]
|
||||||
|
|
||||||
|
wholeIP, ipv4 := isIPV4(ip)
|
||||||
|
if ipv4 {
|
||||||
|
node = tree.root4
|
||||||
|
} else {
|
||||||
|
node = tree.root6
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(wholeIP); i += 4 {
|
||||||
|
ip := iputil.Ip2VpnIp(wholeIP[i : i+4])
|
||||||
|
bit := startbit
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.hasValue {
|
||||||
|
value = node.value
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if bit == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok, value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tree *Tree6[T]) MostSpecificContainsIpV4(ip iputil.VpnIp) (ok bool, value T) {
|
||||||
|
bit := startbit
|
||||||
|
node := tree.root4
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.hasValue {
|
||||||
|
value = node.value
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok, value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tree *Tree6[T]) MostSpecificContainsIpV6(hi, lo uint64) (ok bool, value T) {
|
||||||
|
ip := hi
|
||||||
|
node := tree.root6
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
bit := startbit6
|
||||||
|
|
||||||
|
for node != nil {
|
||||||
|
if node.hasValue {
|
||||||
|
value = node.value
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if bit == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip&bit != 0 {
|
||||||
|
node = node.right
|
||||||
|
} else {
|
||||||
|
node = node.left
|
||||||
|
}
|
||||||
|
|
||||||
|
bit >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
ip = lo
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok, value
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIPV4(ip net.IP) (net.IP, bool) {
|
||||||
|
if len(ip) == net.IPv4len {
|
||||||
|
return ip, true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff {
|
||||||
|
return ip[12:16], true
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZeros(p net.IP) bool {
|
||||||
|
for i := 0; i < len(p); i++ {
|
||||||
|
if p[i] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
98
cidr/tree6_test.go
Normal file
98
cidr/tree6_test.go
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
package cidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
||||||
|
tree := NewTree6[string]()
|
||||||
|
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||||
|
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||||
|
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/24"), "4a")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/30"), "4b")
|
||||||
|
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||||
|
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{true, "1", "1.0.0.0"},
|
||||||
|
{true, "1", "1.255.255.255"},
|
||||||
|
{true, "2", "2.1.0.0"},
|
||||||
|
{true, "2", "2.1.255.255"},
|
||||||
|
{true, "3", "3.1.1.0"},
|
||||||
|
{true, "3", "3.1.1.255"},
|
||||||
|
{true, "4a", "4.1.1.255"},
|
||||||
|
{true, "4b", "4.1.1.2"},
|
||||||
|
{true, "4c", "4.1.1.1"},
|
||||||
|
{true, "5", "240.0.0.0"},
|
||||||
|
{true, "5", "255.255.255.255"},
|
||||||
|
{true, "6a", "1:2:0:4:1:1:1:1"},
|
||||||
|
{true, "6b", "1:2:0:4:5:1:1:1"},
|
||||||
|
{true, "6c", "1:2:0:4:5:0:0:0"},
|
||||||
|
{false, "", "239.0.0.0"},
|
||||||
|
{false, "", "4.1.2.2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
ok, r := tree.MostSpecificContains(net.ParseIP(tt.IP))
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
tree = NewTree6[string]()
|
||||||
|
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||||
|
tree.AddCIDR(Parse("::/0"), "cool6")
|
||||||
|
ok, r := tree.MostSpecificContains(net.ParseIP("0.0.0.0"))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.MostSpecificContains(net.ParseIP("255.255.255.255"))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool", r)
|
||||||
|
|
||||||
|
ok, r = tree.MostSpecificContains(net.ParseIP("::"))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool6", r)
|
||||||
|
|
||||||
|
ok, r = tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8"))
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "cool6", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
||||||
|
tree := NewTree6[string]()
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||||
|
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Found bool
|
||||||
|
Result interface{}
|
||||||
|
IP string
|
||||||
|
}{
|
||||||
|
{true, "6a", "1:2:0:4:1:1:1:1"},
|
||||||
|
{true, "6b", "1:2:0:4:5:1:1:1"},
|
||||||
|
{true, "6c", "1:2:0:4:5:0:0:0"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
ip := net.ParseIP(tt.IP)
|
||||||
|
hi := binary.BigEndian.Uint64(ip[:8])
|
||||||
|
lo := binary.BigEndian.Uint64(ip[8:])
|
||||||
|
|
||||||
|
ok, r := tree.MostSpecificContainsIpV6(hi, lo)
|
||||||
|
assert.Equal(t, tt.Found, ok)
|
||||||
|
assert.Equal(t, tt.Result, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -180,15 +180,9 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||||
}
|
}
|
||||||
|
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
|
||||||
// ecdh.PrivateKey lets us get at the encoded bytes, even though
|
rawPriv = key.D.FillBytes(make([]byte, 32))
|
||||||
// we aren't using ECDH here.
|
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y)
|
||||||
eKey, err := key.ECDH()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while converting ecdsa key: %s", err)
|
|
||||||
}
|
|
||||||
rawPriv = eKey.Bytes()
|
|
||||||
pub = eKey.PublicKey().Bytes()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nc := cert.NebulaCertificate{
|
nc := cert.NebulaCertificate{
|
||||||
|
|||||||
@@ -3,18 +3,15 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type trafficDecision int
|
type trafficDecision int
|
||||||
@@ -30,124 +27,130 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type connectionManager struct {
|
type connectionManager struct {
|
||||||
|
in map[uint32]struct{}
|
||||||
|
inLock *sync.RWMutex
|
||||||
|
|
||||||
|
out map[uint32]struct{}
|
||||||
|
outLock *sync.RWMutex
|
||||||
|
|
||||||
// relayUsed holds which relay localIndexs are in use
|
// relayUsed holds which relay localIndexs are in use
|
||||||
relayUsed map[uint32]struct{}
|
relayUsed map[uint32]struct{}
|
||||||
relayUsedLock *sync.RWMutex
|
relayUsedLock *sync.RWMutex
|
||||||
|
|
||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
trafficTimer *LockingTimerWheel[uint32]
|
trafficTimer *LockingTimerWheel[uint32]
|
||||||
intf *Interface
|
intf *Interface
|
||||||
punchy *Punchy
|
pendingDeletion map[uint32]struct{}
|
||||||
|
punchy *Punchy
|
||||||
// Configuration settings
|
|
||||||
checkInterval time.Duration
|
checkInterval time.Duration
|
||||||
pendingDeletionInterval time.Duration
|
pendingDeletionInterval time.Duration
|
||||||
inactivityTimeout atomic.Int64
|
metricsTxPunchy metrics.Counter
|
||||||
dropInactive atomic.Bool
|
|
||||||
|
|
||||||
metricsTxPunchy metrics.Counter
|
|
||||||
|
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager {
|
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
||||||
cm := &connectionManager{
|
var max time.Duration
|
||||||
hostMap: hm,
|
if checkInterval < pendingDeletionInterval {
|
||||||
l: l,
|
max = pendingDeletionInterval
|
||||||
punchy: p,
|
} else {
|
||||||
relayUsed: make(map[uint32]struct{}),
|
max = checkInterval
|
||||||
relayUsedLock: &sync.RWMutex{},
|
|
||||||
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.reload(c, true)
|
nc := &connectionManager{
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
hostMap: intf.hostMap,
|
||||||
cm.reload(c, false)
|
in: make(map[uint32]struct{}),
|
||||||
})
|
inLock: &sync.RWMutex{},
|
||||||
|
out: make(map[uint32]struct{}),
|
||||||
return cm
|
outLock: &sync.RWMutex{},
|
||||||
}
|
relayUsed: make(map[uint32]struct{}),
|
||||||
|
relayUsedLock: &sync.RWMutex{},
|
||||||
func (cm *connectionManager) reload(c *config.C, initial bool) {
|
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
|
||||||
if initial {
|
intf: intf,
|
||||||
cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second
|
pendingDeletion: make(map[uint32]struct{}),
|
||||||
cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second
|
checkInterval: checkInterval,
|
||||||
|
pendingDeletionInterval: pendingDeletionInterval,
|
||||||
// We want at least a minimum resolution of 500ms per tick so that we can hit these intervals
|
punchy: punchy,
|
||||||
// pretty close to their configured duration.
|
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||||
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it.
|
l: l,
|
||||||
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
|
|
||||||
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
|
|
||||||
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if initial || c.HasChanged("tunnels.inactivity_timeout") {
|
nc.Start(ctx)
|
||||||
old := cm.getInactivityTimeout()
|
return nc
|
||||||
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
|
|
||||||
if !initial {
|
|
||||||
cm.l.WithField("oldDuration", old).
|
|
||||||
WithField("newDuration", cm.getInactivityTimeout()).
|
|
||||||
Info("Inactivity timeout has changed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if initial || c.HasChanged("tunnels.drop_inactive") {
|
|
||||||
old := cm.dropInactive.Load()
|
|
||||||
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
|
|
||||||
if !initial {
|
|
||||||
cm.l.WithField("oldBool", old).
|
|
||||||
WithField("newBool", cm.dropInactive.Load()).
|
|
||||||
Info("Drop inactive setting has changed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) getInactivityTimeout() time.Duration {
|
func (n *connectionManager) In(localIndex uint32) {
|
||||||
return (time.Duration)(cm.inactivityTimeout.Load())
|
n.inLock.RLock()
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *connectionManager) In(h *HostInfo) {
|
|
||||||
h.in.Store(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *connectionManager) Out(h *HostInfo) {
|
|
||||||
h.out.Store(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *connectionManager) RelayUsed(localIndex uint32) {
|
|
||||||
cm.relayUsedLock.RLock()
|
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := cm.relayUsed[localIndex]; ok {
|
if _, ok := n.in[localIndex]; ok {
|
||||||
cm.relayUsedLock.RUnlock()
|
n.inLock.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cm.relayUsedLock.RUnlock()
|
n.inLock.RUnlock()
|
||||||
cm.relayUsedLock.Lock()
|
n.inLock.Lock()
|
||||||
cm.relayUsed[localIndex] = struct{}{}
|
n.in[localIndex] = struct{}{}
|
||||||
cm.relayUsedLock.Unlock()
|
n.inLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) Out(localIndex uint32) {
|
||||||
|
n.outLock.RLock()
|
||||||
|
// If this already exists, return
|
||||||
|
if _, ok := n.out[localIndex]; ok {
|
||||||
|
n.outLock.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n.outLock.RUnlock()
|
||||||
|
n.outLock.Lock()
|
||||||
|
n.out[localIndex] = struct{}{}
|
||||||
|
n.outLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
||||||
|
n.relayUsedLock.RLock()
|
||||||
|
// If this already exists, return
|
||||||
|
if _, ok := n.relayUsed[localIndex]; ok {
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n.relayUsedLock.RUnlock()
|
||||||
|
n.relayUsedLock.Lock()
|
||||||
|
n.relayUsed[localIndex] = struct{}{}
|
||||||
|
n.relayUsedLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
||||||
// resets the state for this local index
|
// resets the state for this local index
|
||||||
func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) {
|
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
||||||
in := h.in.Swap(false)
|
n.inLock.Lock()
|
||||||
out := h.out.Swap(false)
|
n.outLock.Lock()
|
||||||
if in || out {
|
_, in := n.in[localIndex]
|
||||||
h.lastUsed = now
|
_, out := n.out[localIndex]
|
||||||
}
|
delete(n.in, localIndex)
|
||||||
|
delete(n.out, localIndex)
|
||||||
|
n.inLock.Unlock()
|
||||||
|
n.outLock.Unlock()
|
||||||
return in, out
|
return in, out
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTrafficWatch must be called for every new HostInfo.
|
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
||||||
// We will continue to monitor the HostInfo until the tunnel is dropped.
|
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
||||||
func (cm *connectionManager) AddTrafficWatch(h *HostInfo) {
|
n.outLock.Lock()
|
||||||
if h.out.Swap(true) == false {
|
if _, ok := n.out[localIndex]; ok {
|
||||||
cm.trafficTimer.Add(h.localIndexId, cm.checkInterval)
|
n.outLock.Unlock()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
n.out[localIndex] = struct{}{}
|
||||||
|
n.trafficTimer.Add(localIndex, n.checkInterval)
|
||||||
|
n.outLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) Start(ctx context.Context) {
|
func (n *connectionManager) Start(ctx context.Context) {
|
||||||
clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration)
|
go n.Run(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *connectionManager) Run(ctx context.Context) {
|
||||||
|
//TODO: this tick should be based on the min wheel tick? Check firewall
|
||||||
|
clockSource := time.NewTicker(500 * time.Millisecond)
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
@@ -160,137 +163,128 @@ func (cm *connectionManager) Start(ctx context.Context) {
|
|||||||
return
|
return
|
||||||
|
|
||||||
case now := <-clockSource.C:
|
case now := <-clockSource.C:
|
||||||
cm.trafficTimer.Advance(now)
|
n.trafficTimer.Advance(now)
|
||||||
for {
|
for {
|
||||||
localIndex, has := cm.trafficTimer.Purge()
|
localIndex, has := n.trafficTimer.Purge()
|
||||||
if !has {
|
if !has {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.doTrafficCheck(localIndex, p, nb, out, now)
|
n.doTrafficCheck(localIndex, p, nb, out, now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||||
decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now)
|
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
||||||
|
|
||||||
switch decision {
|
switch decision {
|
||||||
case deleteTunnel:
|
case deleteTunnel:
|
||||||
if cm.hostMap.DeleteHostInfo(hostinfo) {
|
if n.hostMap.DeleteHostInfo(hostinfo) {
|
||||||
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||||
cm.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
case closeTunnel:
|
case closeTunnel:
|
||||||
cm.intf.sendCloseTunnel(hostinfo)
|
n.intf.sendCloseTunnel(hostinfo)
|
||||||
cm.intf.closeTunnel(hostinfo)
|
n.intf.closeTunnel(hostinfo)
|
||||||
|
|
||||||
case swapPrimary:
|
case swapPrimary:
|
||||||
cm.swapPrimary(hostinfo, primary)
|
n.swapPrimary(hostinfo, primary)
|
||||||
|
|
||||||
case migrateRelays:
|
case migrateRelays:
|
||||||
cm.migrateRelayUsed(hostinfo, primary)
|
n.migrateRelayUsed(hostinfo, primary)
|
||||||
|
|
||||||
case tryRehandshake:
|
case tryRehandshake:
|
||||||
cm.tryRehandshake(hostinfo)
|
n.tryRehandshake(hostinfo)
|
||||||
|
|
||||||
case sendTestPacket:
|
case sendTestPacket:
|
||||||
cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.resetRelayTrafficCheck(hostinfo)
|
n.resetRelayTrafficCheck(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||||
if hostinfo != nil {
|
if hostinfo != nil {
|
||||||
cm.relayUsedLock.Lock()
|
n.relayUsedLock.Lock()
|
||||||
defer cm.relayUsedLock.Unlock()
|
defer n.relayUsedLock.Unlock()
|
||||||
// No need to migrate any relays, delete usage info now.
|
// No need to migrate any relays, delete usage info now.
|
||||||
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
delete(cm.relayUsed, idx)
|
delete(n.relayUsed, idx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||||
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||||
|
|
||||||
for _, r := range relayFor {
|
for _, r := range relayFor {
|
||||||
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
||||||
|
|
||||||
var index uint32
|
var index uint32
|
||||||
var relayFrom netip.Addr
|
var relayFrom iputil.VpnIp
|
||||||
var relayTo netip.Addr
|
var relayTo iputil.VpnIp
|
||||||
switch {
|
switch {
|
||||||
case ok:
|
case ok && existing.State == Established:
|
||||||
switch existing.State {
|
// This relay already exists in newhostinfo, then do nothing.
|
||||||
case Established, PeerRequested, Disestablished:
|
continue
|
||||||
// This relay already exists in newhostinfo, then do nothing.
|
case ok && existing.State == Requested:
|
||||||
continue
|
// The relay exists in a Requested state; re-send the request
|
||||||
case Requested:
|
index = existing.LocalIndex
|
||||||
// The relayed connection exists in a Requested state; re-send the request
|
switch r.Type {
|
||||||
index = existing.LocalIndex
|
case TerminalType:
|
||||||
switch r.Type {
|
relayFrom = n.intf.myVpnIp
|
||||||
case TerminalType:
|
relayTo = existing.PeerIp
|
||||||
relayFrom = cm.intf.myVpnNet.Addr()
|
case ForwardingType:
|
||||||
relayTo = existing.PeerIp
|
relayFrom = existing.PeerIp
|
||||||
case ForwardingType:
|
relayTo = newhostinfo.vpnIp
|
||||||
relayFrom = existing.PeerIp
|
default:
|
||||||
relayTo = newhostinfo.vpnIp
|
// should never happen
|
||||||
default:
|
|
||||||
// should never happen
|
|
||||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
case !ok:
|
case !ok:
|
||||||
cm.relayUsedLock.RLock()
|
n.relayUsedLock.RLock()
|
||||||
if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed {
|
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
||||||
// The relay hasn't been used; don't migrate it.
|
// The relay hasn't been used; don't migrate it.
|
||||||
cm.relayUsedLock.RUnlock()
|
n.relayUsedLock.RUnlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cm.relayUsedLock.RUnlock()
|
n.relayUsedLock.RUnlock()
|
||||||
// The relay doesn't exist at all; create some relay state and send the request.
|
// The relay doesn't exist at all; create some relay state and send the request.
|
||||||
var err error
|
var err error
|
||||||
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerIp, nil, r.Type, Requested)
|
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch r.Type {
|
switch r.Type {
|
||||||
case TerminalType:
|
case TerminalType:
|
||||||
relayFrom = cm.intf.myVpnNet.Addr()
|
relayFrom = n.intf.myVpnIp
|
||||||
relayTo = r.PeerIp
|
relayTo = r.PeerIp
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
relayFrom = r.PeerIp
|
relayFrom = r.PeerIp
|
||||||
relayTo = newhostinfo.vpnIp
|
relayTo = newhostinfo.vpnIp
|
||||||
default:
|
default:
|
||||||
// should never happen
|
// should never happen
|
||||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: IPV6-WORK
|
|
||||||
relayFromB := relayFrom.As4()
|
|
||||||
relayToB := relayTo.As4()
|
|
||||||
|
|
||||||
// Send a CreateRelayRequest to the peer.
|
// Send a CreateRelayRequest to the peer.
|
||||||
req := NebulaControl{
|
req := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
InitiatorRelayIndex: index,
|
InitiatorRelayIndex: index,
|
||||||
RelayFromIp: binary.BigEndian.Uint32(relayFromB[:]),
|
RelayFromIp: uint32(relayFrom),
|
||||||
RelayToIp: binary.BigEndian.Uint32(relayToB[:]),
|
RelayToIp: uint32(relayTo),
|
||||||
}
|
}
|
||||||
msg, err := req.Marshal()
|
msg, err := req.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||||
} else {
|
} else {
|
||||||
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
cm.l.WithFields(logrus.Fields{
|
n.l.WithFields(logrus.Fields{
|
||||||
"relayFrom": req.RelayFromIp,
|
"relayFrom": iputil.VpnIp(req.RelayFromIp),
|
||||||
"relayTo": req.RelayToIp,
|
"relayTo": iputil.VpnIp(req.RelayToIp),
|
||||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||||
"responderRelayIndex": req.ResponderRelayIndex,
|
"responderRelayIndex": req.ResponderRelayIndex,
|
||||||
"vpnIp": newhostinfo.vpnIp}).
|
"vpnIp": newhostinfo.vpnIp}).
|
||||||
@@ -299,45 +293,46 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||||
// Read lock the main hostmap to order decisions based on tunnels being the primary tunnel
|
n.hostMap.RLock()
|
||||||
cm.hostMap.RLock()
|
defer n.hostMap.RUnlock()
|
||||||
defer cm.hostMap.RUnlock()
|
|
||||||
|
|
||||||
hostinfo := cm.hostMap.Indexes[localIndex]
|
hostinfo := n.hostMap.Indexes[localIndex]
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap")
|
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
||||||
|
delete(n.pendingDeletion, localIndex)
|
||||||
return doNothing, nil, nil
|
return doNothing, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if cm.isInvalidCertificate(now, hostinfo) {
|
if n.isInvalidCertificate(now, hostinfo) {
|
||||||
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
return closeTunnel, hostinfo, nil
|
return closeTunnel, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
primary := cm.hostMap.Hosts[hostinfo.vpnIp]
|
primary := n.hostMap.Hosts[hostinfo.vpnIp]
|
||||||
mainHostInfo := true
|
mainHostInfo := true
|
||||||
if primary != nil && primary != hostinfo {
|
if primary != nil && primary != hostinfo {
|
||||||
mainHostInfo = false
|
mainHostInfo = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for traffic on this hostinfo
|
// Check for traffic on this hostinfo
|
||||||
inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now)
|
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
||||||
|
|
||||||
// A hostinfo is determined alive if there is incoming traffic
|
// A hostinfo is determined alive if there is incoming traffic
|
||||||
if inTraffic {
|
if inTraffic {
|
||||||
decision := doNothing
|
decision := doNothing
|
||||||
if cm.l.Level >= logrus.DebugLevel {
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(cm.l).
|
hostinfo.logger(n.l).
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
}
|
}
|
||||||
hostinfo.pendingDeletion.Store(false)
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
|
|
||||||
if mainHostInfo {
|
if mainHostInfo {
|
||||||
decision = tryRehandshake
|
decision = tryRehandshake
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if cm.shouldSwapPrimary(hostinfo, primary) {
|
if n.shouldSwapPrimary(hostinfo, primary) {
|
||||||
decision = swapPrimary
|
decision = swapPrimary
|
||||||
} else {
|
} else {
|
||||||
// migrate the relays to the primary, if in use.
|
// migrate the relays to the primary, if in use.
|
||||||
@@ -345,55 +340,46 @@ func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Tim
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||||
|
|
||||||
if !outTraffic {
|
if !outTraffic {
|
||||||
// Send a punch packet to keep the NAT state alive
|
// Send a punch packet to keep the NAT state alive
|
||||||
cm.sendPunch(hostinfo)
|
n.sendPunch(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
return decision, hostinfo, primary
|
return decision, hostinfo, primary
|
||||||
}
|
}
|
||||||
|
|
||||||
if hostinfo.pendingDeletion.Load() {
|
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
||||||
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
||||||
hostinfo.logger(cm.l).
|
hostinfo.logger(n.l).
|
||||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||||
Info("Tunnel status")
|
Info("Tunnel status")
|
||||||
|
|
||||||
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||||
return deleteTunnel, hostinfo, nil
|
return deleteTunnel, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
decision := doNothing
|
decision := doNothing
|
||||||
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||||
if !outTraffic {
|
if !outTraffic {
|
||||||
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
|
|
||||||
if isInactive {
|
|
||||||
// Tunnel is inactive, tear it down
|
|
||||||
hostinfo.logger(cm.l).
|
|
||||||
WithField("inactiveDuration", inactiveFor).
|
|
||||||
WithField("primary", mainHostInfo).
|
|
||||||
Info("Dropping tunnel due to inactivity")
|
|
||||||
|
|
||||||
return closeTunnel, hostinfo, primary
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||||
// Just maintain NAT state if configured to do so.
|
// Just maintain NAT state if configured to do so.
|
||||||
cm.sendPunch(hostinfo)
|
n.sendPunch(hostinfo)
|
||||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||||
return doNothing, nil, nil
|
return doNothing, nil, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cm.punchy.GetTargetEverything() {
|
if n.punchy.GetTargetEverything() {
|
||||||
// This is similar to the old punchy behavior with a slight optimization.
|
// This is similar to the old punchy behavior with a slight optimization.
|
||||||
// We aren't receiving traffic but we are sending it, punch on all known
|
// We aren't receiving traffic but we are sending it, punch on all known
|
||||||
// ips in case we need to re-prime NAT state
|
// ips in case we need to re-prime NAT state
|
||||||
cm.sendPunch(hostinfo)
|
n.sendPunch(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cm.l.Level >= logrus.DebugLevel {
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(cm.l).
|
hostinfo.logger(n.l).
|
||||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
}
|
}
|
||||||
@@ -402,118 +388,95 @@ func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Tim
|
|||||||
decision = sendTestPacket
|
decision = sendTestPacket
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if cm.l.Level >= logrus.DebugLevel {
|
if n.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(cm.l).Debugf("Hostinfo sadness")
|
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.pendingDeletion.Store(true)
|
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
||||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval)
|
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
||||||
return decision, hostinfo, nil
|
return decision, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) {
|
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||||
if cm.dropInactive.Load() == false {
|
|
||||||
// We aren't configured to drop inactive tunnels
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
inactiveDuration := now.Sub(hostinfo.lastUsed)
|
|
||||||
if inactiveDuration < cm.getInactivityTimeout() {
|
|
||||||
// It's not considered inactive
|
|
||||||
return inactiveDuration, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The tunnel is inactive
|
|
||||||
return inactiveDuration, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
|
||||||
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
||||||
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||||
// Let's sort this out.
|
// Let's sort this out.
|
||||||
|
|
||||||
if current.vpnIp.Compare(cm.intf.myVpnNet.Addr()) < 0 {
|
if current.vpnIp < n.intf.myVpnIp {
|
||||||
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
||||||
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
||||||
// The remotes vpn ip is lower than mine. I will not flip.
|
// The remotes vpn ip is lower than mine. I will not flip.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
certState := cm.intf.pki.GetCertState()
|
certState := n.intf.pki.GetCertState()
|
||||||
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
|
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||||
cm.hostMap.Lock()
|
n.hostMap.Lock()
|
||||||
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||||
if cm.hostMap.Hosts[current.vpnIp] == primary {
|
if n.hostMap.Hosts[current.vpnIp] == primary {
|
||||||
cm.hostMap.unlockedMakePrimary(current)
|
n.hostMap.unlockedMakePrimary(current)
|
||||||
}
|
}
|
||||||
cm.hostMap.Unlock()
|
n.hostMap.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
||||||
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
||||||
// check and return true.
|
// check and return true.
|
||||||
func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||||
remoteCert := hostinfo.GetCert()
|
remoteCert := hostinfo.GetCert()
|
||||||
if remoteCert == nil {
|
if remoteCert == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
valid, err := remoteCert.VerifyWithCache(now, cm.intf.pki.GetCAPool())
|
valid, err := remoteCert.VerifyWithCache(now, n.intf.pki.GetCAPool())
|
||||||
if valid {
|
if valid {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cm.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
||||||
// Block listed certificates should always be disconnected
|
// Block listed certificates should always be disconnected
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
fingerprint, _ := remoteCert.Sha256Sum()
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
hostinfo.logger(cm.l).WithError(err).
|
hostinfo.logger(n.l).WithError(err).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
|
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||||
if !cm.punchy.GetPunch() {
|
if !n.punchy.GetPunch() {
|
||||||
// Punching is disabled
|
// Punching is disabled
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if cm.intf.lightHouse.IsLighthouseIP(hostinfo.vpnIp) {
|
if n.punchy.GetTargetEverything() {
|
||||||
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
|
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||||
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
|
n.metricsTxPunchy.Inc(1)
|
||||||
// would lose the ability to notify us and punchy.respond would become unreliable.
|
n.intf.outside.WriteTo([]byte{1}, addr)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if cm.punchy.GetTargetEverything() {
|
|
||||||
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
|
||||||
cm.metricsTxPunchy.Inc(1)
|
|
||||||
cm.intf.outside.WriteTo([]byte{1}, addr)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
} else if hostinfo.remote.IsValid() {
|
} else if hostinfo.remote != nil {
|
||||||
cm.metricsTxPunchy.Inc(1)
|
n.metricsTxPunchy.Inc(1)
|
||||||
cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||||
certState := cm.intf.pki.GetCertState()
|
certState := n.intf.pki.GetCertState()
|
||||||
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.l.WithField("vpnIp", hostinfo.vpnIp).
|
n.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||||
WithField("reason", "local certificate is not current").
|
WithField("reason", "local certificate is not current").
|
||||||
Info("Re-handshaking with remote")
|
Info("Re-handshaking with remote")
|
||||||
|
|
||||||
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
n.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,29 +1,32 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var vpnIp iputil.VpnIp
|
||||||
|
|
||||||
func newTestLighthouse() *LightHouse {
|
func newTestLighthouse() *LightHouse {
|
||||||
lh := &LightHouse{
|
lh := &LightHouse{
|
||||||
l: test.NewLogger(),
|
l: test.NewLogger(),
|
||||||
addrMap: map[netip.Addr]*RemoteList{},
|
addrMap: map[iputil.VpnIp]*RemoteList{},
|
||||||
queryChan: make(chan netip.Addr, 10),
|
queryChan: make(chan iputil.VpnIp, 10),
|
||||||
}
|
}
|
||||||
lighthouses := map[netip.Addr]struct{}{}
|
lighthouses := map[iputil.VpnIp]struct{}{}
|
||||||
staticList := map[netip.Addr]struct{}{}
|
staticList := map[iputil.VpnIp]struct{}{}
|
||||||
|
|
||||||
lh.lighthouses.Store(&lighthouses)
|
lh.lighthouses.Store(&lighthouses)
|
||||||
lh.staticList.Store(&staticList)
|
lh.staticList.Store(&staticList)
|
||||||
@@ -34,15 +37,13 @@ func newTestLighthouse() *LightHouse {
|
|||||||
func Test_NewConnectionManagerTest(t *testing.T) {
|
func Test_NewConnectionManagerTest(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := newHostMap(l, vpncidr)
|
hostMap := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
RawCertificate: []byte{},
|
RawCertificate: []byte{},
|
||||||
PrivateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
@@ -64,10 +65,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
ifce.pki.cs.Store(cs)
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
conf := config.NewC(l)
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
punchy := NewPunchyFromConfig(l, conf)
|
defer cancel()
|
||||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
nc.intf = ifce
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
@@ -85,32 +86,31 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(hostinfo)
|
nc.Out(hostinfo.localIndexId)
|
||||||
nc.In(hostinfo)
|
nc.In(hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.True(t, hostinfo.out.Load())
|
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
||||||
assert.True(t, hostinfo.in.Load())
|
|
||||||
|
|
||||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.out.Load())
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
|
||||||
// Do another traffic check tick, this host should be pending deletion now
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
nc.Out(hostinfo)
|
nc.Out(hostinfo.localIndexId)
|
||||||
assert.True(t, hostinfo.out.Load())
|
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.True(t, hostinfo.pendingDeletion.Load())
|
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.out.Load())
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
// Do a final traffic check tick, the host should now be removed
|
// Do a final traffic check tick, the host should now be removed
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
@@ -118,15 +118,12 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
func Test_NewConnectionManagerTest2(t *testing.T) {
|
func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := newHostMap(l, vpncidr)
|
hostMap := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
RawCertificate: []byte{},
|
RawCertificate: []byte{},
|
||||||
PrivateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
@@ -148,10 +145,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
ifce.pki.cs.Store(cs)
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
conf := config.NewC(l)
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
punchy := NewPunchyFromConfig(l, conf)
|
defer cancel()
|
||||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
nc.intf = ifce
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
@@ -169,130 +166,33 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(hostinfo)
|
nc.Out(hostinfo.localIndexId)
|
||||||
nc.In(hostinfo)
|
nc.In(hostinfo.localIndexId)
|
||||||
assert.True(t, hostinfo.in.Load())
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp)
|
||||||
assert.True(t, hostinfo.out.Load())
|
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
|
||||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.out.Load())
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
|
|
||||||
// Do another traffic check tick, this host should be pending deletion now
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
nc.Out(hostinfo)
|
nc.Out(hostinfo.localIndexId)
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.True(t, hostinfo.pendingDeletion.Load())
|
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.out.Load())
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
// We saw traffic, should no longer be pending deletion
|
// We saw traffic, should no longer be pending deletion
|
||||||
nc.In(hostinfo)
|
nc.In(hostinfo.localIndexId)
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.out.Load())
|
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
|
||||||
l := test.NewLogger()
|
|
||||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
|
||||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
|
||||||
|
|
||||||
// Very incomplete mock objects
|
|
||||||
hostMap := newHostMap(l, vpncidr)
|
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
|
||||||
|
|
||||||
cs := &CertState{
|
|
||||||
RawCertificate: []byte{},
|
|
||||||
PrivateKey: []byte{},
|
|
||||||
Certificate: &cert.NebulaCertificate{},
|
|
||||||
RawCertificateNoKey: []byte{},
|
|
||||||
}
|
|
||||||
|
|
||||||
lh := newTestLighthouse()
|
|
||||||
ifce := &Interface{
|
|
||||||
hostMap: hostMap,
|
|
||||||
inside: &test.NoopTun{},
|
|
||||||
outside: &udp.NoopConn{},
|
|
||||||
firewall: &Firewall{},
|
|
||||||
lightHouse: lh,
|
|
||||||
pki: &PKI{},
|
|
||||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
|
||||||
l: l,
|
|
||||||
}
|
|
||||||
ifce.pki.cs.Store(cs)
|
|
||||||
|
|
||||||
// Create manager
|
|
||||||
conf := config.NewC(l)
|
|
||||||
conf.Settings["tunnels"] = map[interface{}]interface{}{
|
|
||||||
"drop_inactive": true,
|
|
||||||
}
|
|
||||||
punchy := NewPunchyFromConfig(l, conf)
|
|
||||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
|
||||||
assert.True(t, nc.dropInactive.Load())
|
|
||||||
nc.intf = ifce
|
|
||||||
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
|
||||||
hostinfo := &HostInfo{
|
|
||||||
vpnIp: vpnIp,
|
|
||||||
localIndexId: 1099,
|
|
||||||
remoteIndexId: 9901,
|
|
||||||
}
|
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
|
||||||
myCert: &cert.NebulaCertificate{},
|
|
||||||
H: &noise.HandshakeState{},
|
|
||||||
}
|
|
||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
|
||||||
|
|
||||||
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
|
|
||||||
nc.Out(hostinfo)
|
|
||||||
nc.In(hostinfo)
|
|
||||||
assert.True(t, hostinfo.out.Load())
|
|
||||||
assert.True(t, hostinfo.in.Load())
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
|
|
||||||
assert.Equal(t, tryRehandshake, decision)
|
|
||||||
assert.Equal(t, now, hostinfo.lastUsed)
|
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
|
||||||
assert.False(t, hostinfo.out.Load())
|
|
||||||
assert.False(t, hostinfo.in.Load())
|
|
||||||
|
|
||||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
|
|
||||||
assert.Equal(t, doNothing, decision)
|
|
||||||
assert.Equal(t, now, hostinfo.lastUsed)
|
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
|
||||||
assert.False(t, hostinfo.out.Load())
|
|
||||||
assert.False(t, hostinfo.in.Load())
|
|
||||||
|
|
||||||
// Do another traffic check tick, should still not be pending deletion
|
|
||||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
|
|
||||||
assert.Equal(t, doNothing, decision)
|
|
||||||
assert.Equal(t, now, hostinfo.lastUsed)
|
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
|
||||||
assert.False(t, hostinfo.out.Load())
|
|
||||||
assert.False(t, hostinfo.in.Load())
|
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
|
||||||
|
|
||||||
// Finally advance beyond the inactivity timeout
|
|
||||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
|
|
||||||
assert.Equal(t, closeTunnel, decision)
|
|
||||||
assert.Equal(t, now, hostinfo.lastUsed)
|
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
|
||||||
assert.False(t, hostinfo.out.Load())
|
|
||||||
assert.False(t, hostinfo.in.Load())
|
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
@@ -307,12 +207,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
IP: net.IPv4(172, 1, 1, 2),
|
IP: net.IPv4(172, 1, 1, 2),
|
||||||
Mask: net.IPMask{255, 255, 255, 0},
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
}
|
}
|
||||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
hostMap := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
hostMap := newHostMap(l, vpncidr)
|
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
|
||||||
|
|
||||||
// Generate keys for CA and peer's cert.
|
// Generate keys for CA and peer's cert.
|
||||||
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
@@ -370,10 +268,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
ifce.disconnectInvalid.Store(true)
|
ifce.disconnectInvalid.Store(true)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
conf := config.NewC(l)
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
punchy := NewPunchyFromConfig(l, conf)
|
defer cancel()
|
||||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||||
nc.intf = ifce
|
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||||
ifce.connectionManager = nc
|
ifce.connectionManager = nc
|
||||||
|
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
|
|||||||
@@ -72,8 +72,6 @@ func NewConnectionState(l *logrus.Logger, cipher string, certState *CertState, i
|
|||||||
window: b,
|
window: b,
|
||||||
myCert: certState.Certificate,
|
myCert: certState.Certificate,
|
||||||
}
|
}
|
||||||
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
|
|
||||||
ci.messageCounter.Add(2)
|
|
||||||
|
|
||||||
return ci
|
return ci
|
||||||
}
|
}
|
||||||
|
|||||||
100
control.go
100
control.go
@@ -2,7 +2,7 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -10,7 +10,9 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/overlay"
|
"github.com/slackhq/nebula/overlay"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
||||||
@@ -19,34 +21,33 @@ import (
|
|||||||
type controlEach func(h *HostInfo)
|
type controlEach func(h *HostInfo)
|
||||||
|
|
||||||
type controlHostLister interface {
|
type controlHostLister interface {
|
||||||
QueryVpnIp(vpnIp netip.Addr) *HostInfo
|
QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo
|
||||||
ForEachIndex(each controlEach)
|
ForEachIndex(each controlEach)
|
||||||
ForEachVpnIp(each controlEach)
|
ForEachVpnIp(each controlEach)
|
||||||
GetPreferredRanges() []netip.Prefix
|
GetPreferredRanges() []*net.IPNet
|
||||||
}
|
}
|
||||||
|
|
||||||
type Control struct {
|
type Control struct {
|
||||||
f *Interface
|
f *Interface
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
sshStart func()
|
sshStart func()
|
||||||
statsStart func()
|
statsStart func()
|
||||||
dnsStart func()
|
dnsStart func()
|
||||||
lighthouseStart func()
|
lighthouseStart func()
|
||||||
connectionManagerStart func(context.Context)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ControlHostInfo struct {
|
type ControlHostInfo struct {
|
||||||
VpnIp netip.Addr `json:"vpnIp"`
|
VpnIp net.IP `json:"vpnIp"`
|
||||||
LocalIndex uint32 `json:"localIndex"`
|
LocalIndex uint32 `json:"localIndex"`
|
||||||
RemoteIndex uint32 `json:"remoteIndex"`
|
RemoteIndex uint32 `json:"remoteIndex"`
|
||||||
RemoteAddrs []netip.AddrPort `json:"remoteAddrs"`
|
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
|
||||||
Cert *cert.NebulaCertificate `json:"cert"`
|
Cert *cert.NebulaCertificate `json:"cert"`
|
||||||
MessageCounter uint64 `json:"messageCounter"`
|
MessageCounter uint64 `json:"messageCounter"`
|
||||||
CurrentRemote netip.AddrPort `json:"currentRemote"`
|
CurrentRemote *udp.Addr `json:"currentRemote"`
|
||||||
CurrentRelaysToMe []netip.Addr `json:"currentRelaysToMe"`
|
CurrentRelaysToMe []iputil.VpnIp `json:"currentRelaysToMe"`
|
||||||
CurrentRelaysThroughMe []netip.Addr `json:"currentRelaysThroughMe"`
|
CurrentRelaysThroughMe []iputil.VpnIp `json:"currentRelaysThroughMe"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
|
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
|
||||||
@@ -64,9 +65,6 @@ func (c *Control) Start() {
|
|||||||
if c.dnsStart != nil {
|
if c.dnsStart != nil {
|
||||||
go c.dnsStart()
|
go c.dnsStart()
|
||||||
}
|
}
|
||||||
if c.connectionManagerStart != nil {
|
|
||||||
go c.connectionManagerStart(c.ctx)
|
|
||||||
}
|
|
||||||
if c.lighthouseStart != nil {
|
if c.lighthouseStart != nil {
|
||||||
c.lighthouseStart()
|
c.lighthouseStart()
|
||||||
}
|
}
|
||||||
@@ -133,45 +131,8 @@ func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
|
|
||||||
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) *cert.NebulaCertificate {
|
|
||||||
if c.f.myVpnNet.Addr() == vpnIp {
|
|
||||||
return c.f.pki.GetCertState().Certificate
|
|
||||||
}
|
|
||||||
hi := c.f.hostMap.QueryVpnIp(vpnIp)
|
|
||||||
if hi == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return hi.GetCert()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateTunnel creates a new tunnel to the given vpn ip.
|
|
||||||
func (c *Control) CreateTunnel(vpnIp netip.Addr) {
|
|
||||||
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrintTunnel creates a new tunnel to the given vpn ip.
|
|
||||||
func (c *Control) PrintTunnel(vpnIp netip.Addr) *ControlHostInfo {
|
|
||||||
hi := c.f.hostMap.QueryVpnIp(vpnIp)
|
|
||||||
if hi == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
chi := copyHostInfo(hi, c.f.hostMap.GetPreferredRanges())
|
|
||||||
return &chi
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryLighthouse queries the lighthouse.
|
|
||||||
func (c *Control) QueryLighthouse(vpnIp netip.Addr) *CacheMap {
|
|
||||||
hi := c.f.lightHouse.Query(vpnIp)
|
|
||||||
if hi == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return hi.CopyCache()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
||||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
|
||||||
func (c *Control) GetHostInfoByVpnIp(vpnIp netip.Addr, pending bool) *ControlHostInfo {
|
|
||||||
var hl controlHostLister
|
var hl controlHostLister
|
||||||
if pending {
|
if pending {
|
||||||
hl = c.f.handshakeManager
|
hl = c.f.handshakeManager
|
||||||
@@ -184,26 +145,24 @@ func (c *Control) GetHostInfoByVpnIp(vpnIp netip.Addr, pending bool) *ControlHos
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ch := copyHostInfo(h, c.f.hostMap.GetPreferredRanges())
|
ch := copyHostInfo(h, c.f.hostMap.preferredRanges)
|
||||||
return &ch
|
return &ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
||||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
|
||||||
func (c *Control) SetRemoteForTunnel(vpnIp netip.Addr, addr netip.AddrPort) *ControlHostInfo {
|
|
||||||
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if hostInfo == nil {
|
if hostInfo == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
hostInfo.SetRemote(addr)
|
hostInfo.SetRemote(addr.Copy())
|
||||||
ch := copyHostInfo(hostInfo, c.f.hostMap.GetPreferredRanges())
|
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges)
|
||||||
return &ch
|
return &ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
||||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
|
||||||
func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
|
|
||||||
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if hostInfo == nil {
|
if hostInfo == nil {
|
||||||
return false
|
return false
|
||||||
@@ -246,7 +205,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Learn which hosts are being used as relays, so we can shut them down last.
|
// Learn which hosts are being used as relays, so we can shut them down last.
|
||||||
relayingHosts := map[netip.Addr]*HostInfo{}
|
relayingHosts := map[iputil.VpnIp]*HostInfo{}
|
||||||
// Grab the hostMap lock to access the Relays map
|
// Grab the hostMap lock to access the Relays map
|
||||||
c.f.hostMap.Lock()
|
c.f.hostMap.Lock()
|
||||||
for _, relayingHost := range c.f.hostMap.Relays {
|
for _, relayingHost := range c.f.hostMap.Relays {
|
||||||
@@ -277,16 +236,15 @@ func (c *Control) Device() overlay.Device {
|
|||||||
return c.f.inside
|
return c.f.inside
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||||
|
|
||||||
chi := ControlHostInfo{
|
chi := ControlHostInfo{
|
||||||
VpnIp: h.vpnIp,
|
VpnIp: h.vpnIp.ToIP(),
|
||||||
LocalIndex: h.localIndexId,
|
LocalIndex: h.localIndexId,
|
||||||
RemoteIndex: h.remoteIndexId,
|
RemoteIndex: h.remoteIndexId,
|
||||||
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
||||||
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
|
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
|
||||||
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
|
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
|
||||||
CurrentRemote: h.remote,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.ConnectionState != nil {
|
if h.ConnectionState != nil {
|
||||||
@@ -297,6 +255,10 @@ func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
|||||||
chi.Cert = c.Copy()
|
chi.Cert = c.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.remote != nil {
|
||||||
|
chi.CurrentRemote = h.remote.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
return chi
|
return chi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,14 +2,15 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,19 +18,16 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
||||||
// To properly ensure we are not exposing core memory to the caller
|
// To properly ensure we are not exposing core memory to the caller
|
||||||
hm := newHostMap(l, netip.Prefix{})
|
hm := NewHostMap(l, &net.IPNet{}, make([]*net.IPNet, 0))
|
||||||
hm.preferredRanges.Store(&[]netip.Prefix{})
|
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
|
||||||
|
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
|
||||||
remote1 := netip.MustParseAddrPort("0.0.0.100:4444")
|
|
||||||
remote2 := netip.MustParseAddrPort("[1:2:3:4:5:6:7:8]:4444")
|
|
||||||
|
|
||||||
ipNet := net.IPNet{
|
ipNet := net.IPNet{
|
||||||
IP: remote1.Addr().AsSlice(),
|
IP: net.IPv4(1, 2, 3, 4),
|
||||||
Mask: net.IPMask{255, 255, 255, 0},
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
ipNet2 := net.IPNet{
|
ipNet2 := net.IPNet{
|
||||||
IP: remote2.Addr().AsSlice(),
|
IP: net.ParseIP("1:2:3:4:5:6:7:8"),
|
||||||
Mask: net.IPMask{255, 255, 255, 0},
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,12 +48,8 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
remotes := NewRemoteList(nil)
|
remotes := NewRemoteList(nil)
|
||||||
remotes.unlockedPrependV4(netip.IPv4Unspecified(), NewIp4AndPortFromNetIP(remote1.Addr(), remote1.Port()))
|
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
||||||
remotes.unlockedPrependV6(netip.IPv4Unspecified(), NewIp6AndPortFromNetIP(remote2.Addr(), remote2.Port()))
|
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
||||||
|
|
||||||
vpnIp, ok := netip.AddrFromSlice(ipNet.IP)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
hm.unlockedAddHostInfo(&HostInfo{
|
hm.unlockedAddHostInfo(&HostInfo{
|
||||||
remote: remote1,
|
remote: remote1,
|
||||||
remotes: remotes,
|
remotes: remotes,
|
||||||
@@ -64,17 +58,14 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
remoteIndexId: 200,
|
remoteIndexId: 200,
|
||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnIp: vpnIp,
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: map[iputil.VpnIp]struct{}{},
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}, &Interface{})
|
}, &Interface{})
|
||||||
|
|
||||||
vpnIp2, ok := netip.AddrFromSlice(ipNet2.IP)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
hm.unlockedAddHostInfo(&HostInfo{
|
hm.unlockedAddHostInfo(&HostInfo{
|
||||||
remote: remote1,
|
remote: remote1,
|
||||||
remotes: remotes,
|
remotes: remotes,
|
||||||
@@ -83,10 +74,10 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
remoteIndexId: 200,
|
remoteIndexId: 200,
|
||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnIp: vpnIp2,
|
vpnIp: iputil.Ip2VpnIp(ipNet2.IP),
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: map[iputil.VpnIp]struct{}{},
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}, &Interface{})
|
}, &Interface{})
|
||||||
@@ -98,29 +89,27 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
l: logrus.New(),
|
l: logrus.New(),
|
||||||
}
|
}
|
||||||
|
|
||||||
thi := c.GetHostInfoByVpnIp(vpnIp, false)
|
thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false)
|
||||||
|
|
||||||
expectedInfo := ControlHostInfo{
|
expectedInfo := ControlHostInfo{
|
||||||
VpnIp: vpnIp,
|
VpnIp: net.IPv4(1, 2, 3, 4).To4(),
|
||||||
LocalIndex: 201,
|
LocalIndex: 201,
|
||||||
RemoteIndex: 200,
|
RemoteIndex: 200,
|
||||||
RemoteAddrs: []netip.AddrPort{remote2, remote1},
|
RemoteAddrs: []*udp.Addr{remote2, remote1},
|
||||||
Cert: crt.Copy(),
|
Cert: crt.Copy(),
|
||||||
MessageCounter: 0,
|
MessageCounter: 0,
|
||||||
CurrentRemote: remote1,
|
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
|
||||||
CurrentRelaysToMe: []netip.Addr{},
|
CurrentRelaysToMe: []iputil.VpnIp{},
|
||||||
CurrentRelaysThroughMe: []netip.Addr{},
|
CurrentRelaysThroughMe: []iputil.VpnIp{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we don't have any unexpected fields
|
// Make sure we don't have any unexpected fields
|
||||||
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||||
assert.EqualValues(t, &expectedInfo, thi)
|
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||||
//TODO: netip.Addr reuses global memory for zone identifiers which breaks our "no reused memory check" here
|
|
||||||
//test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
|
||||||
|
|
||||||
// Make sure we don't panic if the host info doesn't have a cert yet
|
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
thi = c.GetHostInfoByVpnIp(vpnIp2, false)
|
thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,13 +4,14 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"net"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
|
||||||
"github.com/google/gopacket"
|
"github.com/google/gopacket"
|
||||||
"github.com/google/gopacket/layers"
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/overlay"
|
"github.com/slackhq/nebula/overlay"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
@@ -49,30 +50,37 @@ func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType,
|
|||||||
|
|
||||||
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
|
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
|
||||||
// This is necessary if you did not configure static hosts or are not running a lighthouse
|
// This is necessary if you did not configure static hosts or are not running a lighthouse
|
||||||
func (c *Control) InjectLightHouseAddr(vpnIp netip.Addr, toAddr netip.AddrPort) {
|
func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) {
|
||||||
c.f.lightHouse.Lock()
|
c.f.lightHouse.Lock()
|
||||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
|
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
|
||||||
remoteList.Lock()
|
remoteList.Lock()
|
||||||
defer remoteList.Unlock()
|
defer remoteList.Unlock()
|
||||||
c.f.lightHouse.Unlock()
|
c.f.lightHouse.Unlock()
|
||||||
|
|
||||||
if toAddr.Addr().Is4() {
|
iVpnIp := iputil.Ip2VpnIp(vpnIp)
|
||||||
remoteList.unlockedPrependV4(vpnIp, NewIp4AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
|
if v4 := toAddr.IP.To4(); v4 != nil {
|
||||||
|
remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port)))
|
||||||
} else {
|
} else {
|
||||||
remoteList.unlockedPrependV6(vpnIp, NewIp6AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
|
remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp
|
// InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp
|
||||||
// This is necessary to inform an initiator of possible relays for communicating with a responder
|
// This is necessary to inform an initiator of possible relays for communicating with a responder
|
||||||
func (c *Control) InjectRelays(vpnIp netip.Addr, relayVpnIps []netip.Addr) {
|
func (c *Control) InjectRelays(vpnIp net.IP, relayVpnIps []net.IP) {
|
||||||
c.f.lightHouse.Lock()
|
c.f.lightHouse.Lock()
|
||||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
|
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
|
||||||
remoteList.Lock()
|
remoteList.Lock()
|
||||||
defer remoteList.Unlock()
|
defer remoteList.Unlock()
|
||||||
c.f.lightHouse.Unlock()
|
c.f.lightHouse.Unlock()
|
||||||
|
|
||||||
remoteList.unlockedSetRelay(vpnIp, vpnIp, relayVpnIps)
|
iVpnIp := iputil.Ip2VpnIp(vpnIp)
|
||||||
|
uVpnIp := []uint32{}
|
||||||
|
for _, rVPnIp := range relayVpnIps {
|
||||||
|
uVpnIp = append(uVpnIp, uint32(iputil.Ip2VpnIp(rVPnIp)))
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteList.unlockedSetRelay(iVpnIp, iVpnIp, uVpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFromTun will pull a packet off the tun side of nebula
|
// GetFromTun will pull a packet off the tun side of nebula
|
||||||
@@ -99,14 +107,13 @@ func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
||||||
func (c *Control) InjectTunUDPPacket(toIp netip.Addr, toPort uint16, fromPort uint16, data []byte) {
|
func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) {
|
||||||
//TODO: IPV6-WORK
|
|
||||||
ip := layers.IPv4{
|
ip := layers.IPv4{
|
||||||
Version: 4,
|
Version: 4,
|
||||||
TTL: 64,
|
TTL: 64,
|
||||||
Protocol: layers.IPProtocolUDP,
|
Protocol: layers.IPProtocolUDP,
|
||||||
SrcIP: c.f.inside.Cidr().Addr().Unmap().AsSlice(),
|
SrcIP: c.f.inside.Cidr().IP,
|
||||||
DstIP: toIp.Unmap().AsSlice(),
|
DstIP: toIp,
|
||||||
}
|
}
|
||||||
|
|
||||||
udp := layers.UDP{
|
udp := layers.UDP{
|
||||||
@@ -131,16 +138,16 @@ func (c *Control) InjectTunUDPPacket(toIp netip.Addr, toPort uint16, fromPort ui
|
|||||||
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
|
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetVpnIp() netip.Addr {
|
func (c *Control) GetVpnIp() iputil.VpnIp {
|
||||||
return c.f.myVpnNet.Addr()
|
return c.f.myVpnIp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetUDPAddr() netip.AddrPort {
|
func (c *Control) GetUDPAddr() string {
|
||||||
return c.f.outside.(*udp.TesterConn).Addr
|
return c.f.outside.(*udp.TesterConn).Addr.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) KillPendingTunnel(vpnIp netip.Addr) bool {
|
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
||||||
hostinfo := c.f.handshakeManager.QueryVpnIp(vpnIp)
|
hostinfo := c.f.handshakeManager.QueryVpnIp(iputil.Ip2VpnIp(vpnIp))
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -157,6 +164,6 @@ func (c *Control) GetCert() *cert.NebulaCertificate {
|
|||||||
return c.f.pki.GetCertState().Certificate
|
return c.f.pki.GetCertState().Certificate
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) ReHandshake(vpnIp netip.Addr) {
|
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
|
||||||
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|||||||
15
dist/arch/nebula.service
vendored
Normal file
15
dist/arch/nebula.service
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Nebula overlay networking tool
|
||||||
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
|
After=basic.target network.target network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
|
SyslogIdentifier=nebula
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
16
dist/fedora/nebula.service
vendored
Normal file
16
dist/fedora/nebula.service
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Nebula overlay networking tool
|
||||||
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
|
After=basic.target network.target network-online.target
|
||||||
|
Before=sshd.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
|
SyslogIdentifier=nebula
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
@@ -3,7 +3,6 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -11,6 +10,7 @@ import (
|
|||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This whole thing should be rewritten to use context
|
// This whole thing should be rewritten to use context
|
||||||
@@ -42,23 +42,21 @@ func (d *dnsRecords) Query(data string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *dnsRecords) QueryCert(data string) string {
|
func (d *dnsRecords) QueryCert(data string) string {
|
||||||
ip, err := netip.ParseAddr(data[:len(data)-1])
|
ip := net.ParseIP(data[:len(data)-1])
|
||||||
if err != nil {
|
if ip == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
iip := iputil.Ip2VpnIp(ip)
|
||||||
hostinfo := d.hostMap.QueryVpnIp(ip)
|
hostinfo := d.hostMap.QueryVpnIp(iip)
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
q := hostinfo.GetCert()
|
q := hostinfo.GetCert()
|
||||||
if q == nil {
|
if q == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
cert := q.Details
|
cert := q.Details
|
||||||
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAfter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAFter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,11 +80,7 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
|||||||
}
|
}
|
||||||
case dns.TypeTXT:
|
case dns.TypeTXT:
|
||||||
a, _, _ := net.SplitHostPort(w.RemoteAddr().String())
|
a, _, _ := net.SplitHostPort(w.RemoteAddr().String())
|
||||||
b, err := netip.ParseAddr(a)
|
b := net.ParseIP(a)
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't answer these queries from non nebula nodes or localhost
|
// We don't answer these queries from non nebula nodes or localhost
|
||||||
//l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR)
|
//l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR)
|
||||||
if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" {
|
if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" {
|
||||||
@@ -102,10 +96,6 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(m.Answer) == 0 {
|
|
||||||
m.Rcode = dns.RcodeNameError
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
|
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
|
||||||
@@ -139,12 +129,7 @@ func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDnsServerAddr(c *config.C) string {
|
func getDnsServerAddr(c *config.C) string {
|
||||||
dnsHost := strings.TrimSpace(c.GetString("lighthouse.dns.host", ""))
|
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
|
||||||
// Old guidance was to provide the literal `[::]` in `lighthouse.dns.host` but that won't resolve.
|
|
||||||
if dnsHost == "[::]" {
|
|
||||||
dnsHost = "::"
|
|
||||||
}
|
|
||||||
return net.JoinHostPort(dnsHost, strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func startDns(l *logrus.Logger, c *config.C) {
|
func startDns(l *logrus.Logger, c *config.C) {
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/slackhq/nebula/config"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParsequery(t *testing.T) {
|
func TestParsequery(t *testing.T) {
|
||||||
@@ -19,40 +17,3 @@ func TestParsequery(t *testing.T) {
|
|||||||
|
|
||||||
//parseQuery(m)
|
//parseQuery(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_getDnsServerAddr(t *testing.T) {
|
|
||||||
c := config.NewC(nil)
|
|
||||||
|
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
|
||||||
"dns": map[interface{}]interface{}{
|
|
||||||
"host": "0.0.0.0",
|
|
||||||
"port": "1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
|
|
||||||
|
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
|
||||||
"dns": map[interface{}]interface{}{
|
|
||||||
"host": "::",
|
|
||||||
"port": "1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
|
||||||
|
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
|
||||||
"dns": map[interface{}]interface{}{
|
|
||||||
"host": "[::]",
|
|
||||||
"port": "1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
|
||||||
|
|
||||||
// Make sure whitespace doesn't mess us up
|
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
|
||||||
"dns": map[interface{}]interface{}{
|
|
||||||
"host": "[::] ",
|
|
||||||
"port": "1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
FROM gcr.io/distroless/static:latest
|
|
||||||
|
|
||||||
ARG TARGETOS TARGETARCH
|
|
||||||
COPY build/$TARGETOS-$TARGETARCH/nebula /nebula
|
|
||||||
COPY build/$TARGETOS-$TARGETARCH/nebula-cert /nebula-cert
|
|
||||||
|
|
||||||
VOLUME ["/config"]
|
|
||||||
|
|
||||||
ENTRYPOINT ["/nebula"]
|
|
||||||
# Allow users to override the args passed to nebula
|
|
||||||
CMD ["-config", "/config/config.yml"]
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
# NebulaOSS/nebula Docker Image
|
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
From the root of the repository, run `make docker`.
|
|
||||||
|
|
||||||
## Running
|
|
||||||
|
|
||||||
To run the built image, use the following command:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run \
|
|
||||||
--name nebula \
|
|
||||||
--network host \
|
|
||||||
--cap-add NET_ADMIN \
|
|
||||||
--volume ./config:/config \
|
|
||||||
--rm \
|
|
||||||
nebulaoss/nebula
|
|
||||||
```
|
|
||||||
|
|
||||||
A few notes:
|
|
||||||
|
|
||||||
- The `NET_ADMIN` capability is necessary to create the tun adapter on the host (this is unnecessary if the tun device is disabled.)
|
|
||||||
- `--volume ./config:/config` should point to a directory that contains your `config.yml` and any other necessary files.
|
|
||||||
@@ -4,29 +4,28 @@
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"fmt"
|
||||||
"slices"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/gopacket"
|
|
||||||
"github.com/google/gopacket/layers"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkHotPath(b *testing.B) {
|
func BenchmarkHotPath(b *testing.B) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
|
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse
|
// Put their info in our lighthouse
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Start the servers
|
// Start the servers
|
||||||
myControl.Start()
|
myControl.Start()
|
||||||
@@ -36,7 +35,7 @@ func BenchmarkHotPath(b *testing.B) {
|
|||||||
r.CancelFlowLogs()
|
r.CancelFlowLogs()
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
_ = r.RouteForAllUntilTxTun(theirControl)
|
_ = r.RouteForAllUntilTxTun(theirControl)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,19 +44,19 @@ func BenchmarkHotPath(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGoodHandshake(t *testing.T) {
|
func TestGoodHandshake(t *testing.T) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse
|
// Put their info in our lighthouse
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Start the servers
|
// Start the servers
|
||||||
myControl.Start()
|
myControl.Start()
|
||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
||||||
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||||
@@ -78,16 +77,16 @@ func TestGoodHandshake(t *testing.T) {
|
|||||||
myControl.WaitForType(1, 0, theirControl)
|
myControl.WaitForType(1, 0, theirControl)
|
||||||
|
|
||||||
t.Log("Make sure our host infos are correct")
|
t.Log("Make sure our host infos are correct")
|
||||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl)
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||||
|
|
||||||
t.Log("Get that cached packet and make sure it looks right")
|
t.Log("Get that cached packet and make sure it looks right")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("Do a bidirectional tunnel test")
|
t.Log("Do a bidirectional tunnel test")
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
defer r.RenderFlow()
|
defer r.RenderFlow()
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
@@ -96,20 +95,20 @@ func TestGoodHandshake(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWrongResponderHandshake(t *testing.T) {
|
func TestWrongResponderHandshake(t *testing.T) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
|
|
||||||
// The IPs here are chosen on purpose:
|
// The IPs here are chosen on purpose:
|
||||||
// The current remote handling will sort by preference, public, and then lexically.
|
// The current remote handling will sort by preference, public, and then lexically.
|
||||||
// So we need them to have a higher address than evil (we could apply a preference though)
|
// So we need them to have a higher address than evil (we could apply a preference though)
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.100/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.99/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
|
||||||
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", "10.128.0.2/24", nil)
|
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Add their real udp addr, which should be tried after evil.
|
// Add their real udp addr, which should be tried after evil.
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), evilUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl, evilControl)
|
r := router.NewR(t, myControl, theirControl, evilControl)
|
||||||
@@ -121,7 +120,7 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
evilControl.Start()
|
evilControl.Start()
|
||||||
|
|
||||||
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||||
h := &header.H{}
|
h := &header.H{}
|
||||||
err := h.Parse(p.Data)
|
err := h.Parse(p.Data)
|
||||||
@@ -129,7 +128,7 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.To == theirUdpAddr && h.Type == 1 {
|
if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 {
|
||||||
return router.RouteAndExit
|
return router.RouteAndExit
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,18 +139,18 @@ func TestWrongResponderHandshake(t *testing.T) {
|
|||||||
|
|
||||||
t.Log("My cached packet should be received by them")
|
t.Log("My cached packet should be received by them")
|
||||||
myCachedPacket := theirControl.GetFromTun(true)
|
myCachedPacket := theirControl.GetFromTun(true)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
t.Log("Test the tunnel with them")
|
t.Log("Test the tunnel with them")
|
||||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl)
|
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
t.Log("Flush all packets from all controllers")
|
t.Log("Flush all packets from all controllers")
|
||||||
r.FlushAll()
|
r.FlushAll()
|
||||||
|
|
||||||
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
||||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(evilVpnIp.Addr(), true), "My pending hostmap should not contain evil")
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil")
|
||||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(evilVpnIp.Addr(), false), "My main hostmap should not contain evil")
|
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil")
|
||||||
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
||||||
|
|
||||||
//TODO: assert hostmaps for everyone
|
//TODO: assert hostmaps for everyone
|
||||||
@@ -165,13 +164,13 @@ func TestStage1Race(t *testing.T) {
|
|||||||
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
|
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
|
||||||
// But will eventually collapse down to a single tunnel
|
// But will eventually collapse down to a single tunnel
|
||||||
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse and vice versa
|
// Put their info in our lighthouse and vice versa
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
@@ -182,8 +181,8 @@ func TestStage1Race(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake to start on both me and them")
|
t.Log("Trigger a handshake to start on both me and them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
t.Log("Get both stage 1 handshake packets")
|
t.Log("Get both stage 1 handshake packets")
|
||||||
myHsForThem := myControl.GetFromUDP(true)
|
myHsForThem := myControl.GetFromUDP(true)
|
||||||
@@ -195,14 +194,14 @@ func TestStage1Race(t *testing.T) {
|
|||||||
|
|
||||||
r.Log("Route until they receive a message packet")
|
r.Log("Route until they receive a message packet")
|
||||||
myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
|
myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
r.Log("Their cached packet should be received by me")
|
r.Log("Their cached packet should be received by me")
|
||||||
theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
|
theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
|
||||||
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
r.Log("Do a bidirectional tunnel test")
|
r.Log("Do a bidirectional tunnel test")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
myHostmapHosts := myControl.ListHostmapHosts(false)
|
myHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
myHostmapIndexes := myControl.ListHostmapIndexes(false)
|
myHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
@@ -220,7 +219,7 @@ func TestStage1Race(t *testing.T) {
|
|||||||
r.Log("Spin until connection manager tears down a tunnel")
|
r.Log("Spin until connection manager tears down a tunnel")
|
||||||
|
|
||||||
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
t.Log("Connection manager hasn't ticked yet")
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@@ -242,13 +241,13 @@ func TestStage1Race(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUncleanShutdownRaceLoser(t *testing.T) {
|
func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
@@ -259,28 +258,28 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
r.Log("Trigger a handshake from me to them")
|
r.Log("Trigger a handshake from me to them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
r.Log("Nuke my hostmap")
|
r.Log("Nuke my hostmap")
|
||||||
myHostmap := myControl.GetHostmap()
|
myHostmap := myControl.GetHostmap()
|
||||||
myHostmap.Hosts = map[netip.Addr]*nebula.HostInfo{}
|
myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||||
myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||||
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me again"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again"))
|
||||||
p = r.RouteForAllUntilTxTun(theirControl)
|
p = r.RouteForAllUntilTxTun(theirControl)
|
||||||
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
|
|
||||||
r.Log("Assert the tunnel works")
|
r.Log("Assert the tunnel works")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
r.Log("Wait for the dead index to go away")
|
r.Log("Wait for the dead index to go away")
|
||||||
start := len(theirControl.GetHostmap().Indexes)
|
start := len(theirControl.GetHostmap().Indexes)
|
||||||
for {
|
for {
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
if len(theirControl.GetHostmap().Indexes) < start {
|
if len(theirControl.GetHostmap().Indexes) < start {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -291,13 +290,13 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUncleanShutdownRaceWinner(t *testing.T) {
|
func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
@@ -308,30 +307,30 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
r.Log("Trigger a handshake from me to them")
|
r.Log("Trigger a handshake from me to them")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
r.Log("Nuke my hostmap")
|
r.Log("Nuke my hostmap")
|
||||||
theirHostmap := theirControl.GetHostmap()
|
theirHostmap := theirControl.GetHostmap()
|
||||||
theirHostmap.Hosts = map[netip.Addr]*nebula.HostInfo{}
|
theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||||
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||||
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||||
|
|
||||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them again"))
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again"))
|
||||||
p = r.RouteForAllUntilTxTun(myControl)
|
p = r.RouteForAllUntilTxTun(myControl)
|
||||||
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||||
r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
r.Log("Assert the tunnel works")
|
r.Log("Assert the tunnel works")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
r.Log("Wait for the dead index to go away")
|
r.Log("Wait for the dead index to go away")
|
||||||
start := len(myControl.GetHostmap().Indexes)
|
start := len(myControl.GetHostmap().Indexes)
|
||||||
for {
|
for {
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
if len(myControl.GetHostmap().Indexes) < start {
|
if len(myControl.GetHostmap().Indexes) < start {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -342,15 +341,15 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRelays(t *testing.T) {
|
func TestRelays(t *testing.T) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
@@ -362,162 +361,31 @@ func TestRelays(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake from me to them via the relay")
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
r.Log("Assert the tunnel works")
|
r.Log("Assert the tunnel works")
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||||
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReestablishRelays(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
|
||||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
|
||||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
|
||||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
|
||||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
|
||||||
defer r.RenderFlow()
|
|
||||||
|
|
||||||
// Start the servers
|
|
||||||
myControl.Start()
|
|
||||||
relayControl.Start()
|
|
||||||
theirControl.Start()
|
|
||||||
|
|
||||||
t.Log("Trigger a handshake from me to them via the relay")
|
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
|
||||||
r.Log("Assert the tunnel works")
|
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
|
||||||
|
|
||||||
t.Log("Ensure packet traversal from them to me via the relay")
|
|
||||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
|
||||||
|
|
||||||
p = r.RouteForAllUntilTxTun(myControl)
|
|
||||||
r.Log("Assert the tunnel works")
|
|
||||||
assertUdpPacket(t, []byte("Hi from them"), p, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
|
|
||||||
|
|
||||||
// If we break the relay's connection to 'them', 'me' needs to detect and recover the connection
|
|
||||||
r.Log("Close the tunnel")
|
|
||||||
relayControl.CloseTunnel(theirVpnIpNet.Addr(), true)
|
|
||||||
|
|
||||||
start := len(myControl.GetHostmap().Indexes)
|
|
||||||
curIndexes := len(myControl.GetHostmap().Indexes)
|
|
||||||
for curIndexes >= start {
|
|
||||||
curIndexes = len(myControl.GetHostmap().Indexes)
|
|
||||||
r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes)
|
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me should fail"))
|
|
||||||
|
|
||||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
|
||||||
return router.RouteAndExit
|
|
||||||
})
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
}
|
|
||||||
r.Log("Dead index went away. Woot!")
|
|
||||||
r.RenderHostmaps("Me removed hostinfo", myControl, relayControl, theirControl)
|
|
||||||
// Next packet should re-establish a relayed connection and work just great.
|
|
||||||
|
|
||||||
t.Logf("Assert the tunnel...")
|
|
||||||
for {
|
|
||||||
t.Log("RouteForAllUntilTxTun")
|
|
||||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
|
||||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
|
||||||
|
|
||||||
p = r.RouteForAllUntilTxTun(theirControl)
|
|
||||||
r.Log("Assert the tunnel works")
|
|
||||||
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
|
|
||||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
|
||||||
if slices.Compare(v4.SrcIP, myVpnIpNet.Addr().AsSlice()) != 0 {
|
|
||||||
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if slices.Compare(v4.DstIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
|
|
||||||
t.Logf("DstIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
|
||||||
if udp == nil {
|
|
||||||
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
data := packet.ApplicationLayer()
|
|
||||||
if data == nil {
|
|
||||||
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if string(data.Payload()) != "Hi from me" {
|
|
||||||
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.Log("I found my lost packet. I am so happy.")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
t.Log("Assert the tunnel works the other way, too")
|
|
||||||
for {
|
|
||||||
t.Log("RouteForAllUntilTxTun")
|
|
||||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
|
||||||
|
|
||||||
p = r.RouteForAllUntilTxTun(myControl)
|
|
||||||
r.Log("Assert the tunnel works")
|
|
||||||
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
|
|
||||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
|
||||||
if slices.Compare(v4.DstIP, myVpnIpNet.Addr().AsSlice()) != 0 {
|
|
||||||
t.Logf("Dst is unexpected...this is not the packet I'm looking for. Keep looking")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if slices.Compare(v4.SrcIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
|
|
||||||
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
|
||||||
if udp == nil {
|
|
||||||
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
data := packet.ApplicationLayer()
|
|
||||||
if data == nil {
|
|
||||||
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if string(data.Payload()) != "Hi from them" {
|
|
||||||
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.Log("I found my lost packet. I am so happy.")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStage1RaceRelays(t *testing.T) {
|
func TestStage1RaceRelays(t *testing.T) {
|
||||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
|
||||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
theirControl.InjectRelays(myVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
relayControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
@@ -529,14 +397,14 @@ func TestStage1RaceRelays(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
r.Log("Get a tunnel between me and relay")
|
r.Log("Get a tunnel between me and relay")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
|
||||||
r.Log("Get a tunnel between them and relay")
|
r.Log("Get a tunnel between them and relay")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
|
||||||
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
r.Log("Wait for a packet from them to me")
|
r.Log("Wait for a packet from them to me")
|
||||||
p := r.RouteForAllUntilTxTun(myControl)
|
p := r.RouteForAllUntilTxTun(myControl)
|
||||||
@@ -553,21 +421,21 @@ func TestStage1RaceRelays(t *testing.T) {
|
|||||||
|
|
||||||
func TestStage1RaceRelays2(t *testing.T) {
|
func TestStage1RaceRelays2(t *testing.T) {
|
||||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
l := NewTestLogger()
|
l := NewTestLogger()
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
|
|
||||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
theirControl.InjectRelays(myVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
|
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
relayControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
@@ -580,16 +448,16 @@ func TestStage1RaceRelays2(t *testing.T) {
|
|||||||
|
|
||||||
r.Log("Get a tunnel between me and relay")
|
r.Log("Get a tunnel between me and relay")
|
||||||
l.Info("Get a tunnel between me and relay")
|
l.Info("Get a tunnel between me and relay")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
|
|
||||||
r.Log("Get a tunnel between them and relay")
|
r.Log("Get a tunnel between them and relay")
|
||||||
l.Info("Get a tunnel between them and relay")
|
l.Info("Get a tunnel between them and relay")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
|
|
||||||
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||||
l.Info("Trigger a handshake from both them and me via relay to them and me")
|
l.Info("Trigger a handshake from both them and me via relay to them and me")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
//r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone)
|
//r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone)
|
||||||
//r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone)
|
//r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone)
|
||||||
@@ -602,7 +470,7 @@ func TestStage1RaceRelays2(t *testing.T) {
|
|||||||
|
|
||||||
r.Log("Assert the tunnel works")
|
r.Log("Assert the tunnel works")
|
||||||
l.Info("Assert the tunnel works")
|
l.Info("Assert the tunnel works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
|
||||||
t.Log("Wait until we remove extra tunnels")
|
t.Log("Wait until we remove extra tunnels")
|
||||||
l.Info("Wait until we remove extra tunnels")
|
l.Info("Wait until we remove extra tunnels")
|
||||||
@@ -622,7 +490,7 @@ func TestStage1RaceRelays2(t *testing.T) {
|
|||||||
"theirControl": len(theirControl.GetHostmap().Indexes),
|
"theirControl": len(theirControl.GetHostmap().Indexes),
|
||||||
"relayControl": len(relayControl.GetHostmap().Indexes),
|
"relayControl": len(relayControl.GetHostmap().Indexes),
|
||||||
}).Info("Waiting for hostinfos to be removed...")
|
}).Info("Waiting for hostinfos to be removed...")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
t.Log("Connection manager hasn't ticked yet")
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
retries--
|
retries--
|
||||||
@@ -630,7 +498,7 @@ func TestStage1RaceRelays2(t *testing.T) {
|
|||||||
|
|
||||||
r.Log("Assert the tunnel works")
|
r.Log("Assert the tunnel works")
|
||||||
l.Info("Assert the tunnel works")
|
l.Info("Assert the tunnel works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
|
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
@@ -639,17 +507,16 @@ func TestStage1RaceRelays2(t *testing.T) {
|
|||||||
//
|
//
|
||||||
////TODO: assert hostmaps
|
////TODO: assert hostmaps
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRehandshakingRelays(t *testing.T) {
|
func TestRehandshakingRelays(t *testing.T) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||||
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
@@ -661,11 +528,11 @@ func TestRehandshakingRelays(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake from me to them via the relay")
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
r.Log("Assert the tunnel works")
|
r.Log("Assert the tunnel works")
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
|
||||||
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||||
@@ -689,8 +556,8 @@ func TestRehandshakingRelays(t *testing.T) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
c := myControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
if len(c.Cert.Details.Groups) != 0 {
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
// We have a new certificate now
|
// We have a new certificate now
|
||||||
r.Log("Certificate between my and relay is updated!")
|
r.Log("Certificate between my and relay is updated!")
|
||||||
@@ -702,8 +569,8 @@ func TestRehandshakingRelays(t *testing.T) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
c := theirControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
if len(c.Cert.Details.Groups) != 0 {
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
// We have a new certificate now
|
// We have a new certificate now
|
||||||
r.Log("Certificate between their and relay is updated!")
|
r.Log("Certificate between their and relay is updated!")
|
||||||
@@ -714,13 +581,13 @@ func TestRehandshakingRelays(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
// We should have two hostinfos on all sides
|
// We should have two hostinfos on all sides
|
||||||
for len(myControl.GetHostmap().Indexes) != 2 {
|
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||||
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.Log("yupitdoes")
|
r.Log("yupitdoes")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@@ -728,7 +595,7 @@ func TestRehandshakingRelays(t *testing.T) {
|
|||||||
for len(theirControl.GetHostmap().Indexes) != 2 {
|
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||||
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.Log("yupitdoes")
|
r.Log("yupitdoes")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@@ -736,7 +603,7 @@ func TestRehandshakingRelays(t *testing.T) {
|
|||||||
for len(relayControl.GetHostmap().Indexes) != 2 {
|
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||||
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.Log("yupitdoes")
|
r.Log("yupitdoes")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@@ -745,15 +612,15 @@ func TestRehandshakingRelays(t *testing.T) {
|
|||||||
|
|
||||||
func TestRehandshakingRelaysPrimary(t *testing.T) {
|
func TestRehandshakingRelaysPrimary(t *testing.T) {
|
||||||
// This test is the same as TestRehandshakingRelays but one of the terminal types is a primary swap winner
|
// This test is the same as TestRehandshakingRelays but one of the terminal types is a primary swap winner
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.128/24", m{"relay": m{"use_relays": true}})
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 128}, m{"relay": m{"use_relays": true}})
|
||||||
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", "10.128.0.1/24", m{"relay": m{"am_relay": true}})
|
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 1}, m{"relay": m{"am_relay": true}})
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
// Teach my how to get to the relay and that their can be reached via the relay
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
@@ -765,11 +632,11 @@ func TestRehandshakingRelaysPrimary(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Trigger a handshake from me to them via the relay")
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
p := r.RouteForAllUntilTxTun(theirControl)
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
r.Log("Assert the tunnel works")
|
r.Log("Assert the tunnel works")
|
||||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
|
|
||||||
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||||
@@ -793,8 +660,8 @@ func TestRehandshakingRelaysPrimary(t *testing.T) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||||
c := myControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
if len(c.Cert.Details.Groups) != 0 {
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
// We have a new certificate now
|
// We have a new certificate now
|
||||||
r.Log("Certificate between my and relay is updated!")
|
r.Log("Certificate between my and relay is updated!")
|
||||||
@@ -806,8 +673,8 @@ func TestRehandshakingRelaysPrimary(t *testing.T) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||||
c := theirControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||||
if len(c.Cert.Details.Groups) != 0 {
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
// We have a new certificate now
|
// We have a new certificate now
|
||||||
r.Log("Certificate between their and relay is updated!")
|
r.Log("Certificate between their and relay is updated!")
|
||||||
@@ -818,13 +685,13 @@ func TestRehandshakingRelaysPrimary(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||||
// We should have two hostinfos on all sides
|
// We should have two hostinfos on all sides
|
||||||
for len(myControl.GetHostmap().Indexes) != 2 {
|
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||||
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.Log("yupitdoes")
|
r.Log("yupitdoes")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@@ -832,7 +699,7 @@ func TestRehandshakingRelaysPrimary(t *testing.T) {
|
|||||||
for len(theirControl.GetHostmap().Indexes) != 2 {
|
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||||
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.Log("yupitdoes")
|
r.Log("yupitdoes")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@@ -840,7 +707,7 @@ func TestRehandshakingRelaysPrimary(t *testing.T) {
|
|||||||
for len(relayControl.GetHostmap().Indexes) != 2 {
|
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||||
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||||
r.Log("Assert the relay tunnel still works")
|
r.Log("Assert the relay tunnel still works")
|
||||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||||
r.Log("yupitdoes")
|
r.Log("yupitdoes")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
@@ -848,13 +715,13 @@ func TestRehandshakingRelaysPrimary(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRehandshaking(t *testing.T) {
|
func TestRehandshaking(t *testing.T) {
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", "10.128.0.2/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", "10.128.0.1/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse and vice versa
|
// Put their info in our lighthouse and vice versa
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
@@ -865,7 +732,7 @@ func TestRehandshaking(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Stand up a tunnel between me and them")
|
t.Log("Stand up a tunnel between me and them")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
@@ -887,8 +754,8 @@ func TestRehandshaking(t *testing.T) {
|
|||||||
myConfig.ReloadConfigString(string(rc))
|
myConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
c := theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
if len(c.Cert.Details.Groups) != 0 {
|
if len(c.Cert.Details.Groups) != 0 {
|
||||||
// We have a new certificate now
|
// We have a new certificate now
|
||||||
break
|
break
|
||||||
@@ -914,19 +781,19 @@ func TestRehandshaking(t *testing.T) {
|
|||||||
|
|
||||||
r.Log("Spin until there is only 1 tunnel")
|
r.Log("Spin until there is only 1 tunnel")
|
||||||
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
t.Log("Connection manager hasn't ticked yet")
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
// Make sure the correct tunnel won
|
// Make sure the correct tunnel won
|
||||||
c := theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
assert.Contains(t, c.Cert.Details.Groups, "new group")
|
assert.Contains(t, c.Cert.Details.Groups, "new group")
|
||||||
|
|
||||||
// We should only have a single tunnel now on both sides
|
// We should only have a single tunnel now on both sides
|
||||||
@@ -944,13 +811,13 @@ func TestRehandshaking(t *testing.T) {
|
|||||||
func TestRehandshakingLoser(t *testing.T) {
|
func TestRehandshakingLoser(t *testing.T) {
|
||||||
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
|
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
|
||||||
// Should be the one with the new certificate
|
// Should be the one with the new certificate
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", "10.128.0.2/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", "10.128.0.1/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse and vice versa
|
// Put their info in our lighthouse and vice versa
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Build a router so we don't have to reason who gets which packet
|
// Build a router so we don't have to reason who gets which packet
|
||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
@@ -961,10 +828,11 @@ func TestRehandshakingLoser(t *testing.T) {
|
|||||||
theirControl.Start()
|
theirControl.Start()
|
||||||
|
|
||||||
t.Log("Stand up a tunnel between me and them")
|
t.Log("Stand up a tunnel between me and them")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
tt1 := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
tt2 := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||||
|
fmt.Println(tt1.LocalIndex, tt2.LocalIndex)
|
||||||
|
|
||||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
@@ -986,8 +854,8 @@ func TestRehandshakingLoser(t *testing.T) {
|
|||||||
theirConfig.ReloadConfigString(string(rc))
|
theirConfig.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
theirCertInMe := myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
|
|
||||||
_, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"]
|
_, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"]
|
||||||
if theirNewGroup {
|
if theirNewGroup {
|
||||||
@@ -1014,19 +882,19 @@ func TestRehandshakingLoser(t *testing.T) {
|
|||||||
|
|
||||||
r.Log("Spin until there is only 1 tunnel")
|
r.Log("Spin until there is only 1 tunnel")
|
||||||
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
t.Log("Connection manager hasn't ticked yet")
|
t.Log("Connection manager hasn't ticked yet")
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||||
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||||
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||||
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||||
|
|
||||||
// Make sure the correct tunnel won
|
// Make sure the correct tunnel won
|
||||||
theirCertInMe := myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||||
assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group")
|
assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group")
|
||||||
|
|
||||||
// We should only have a single tunnel now on both sides
|
// We should only have a single tunnel now on both sides
|
||||||
@@ -1044,13 +912,13 @@ func TestRaceRegression(t *testing.T) {
|
|||||||
// This test forces stage 1, stage 2, stage 1 to be received by me from them
|
// This test forces stage 1, stage 2, stage 1 to be received by me from them
|
||||||
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
|
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
|
||||||
// caused a cross-linked hostinfo
|
// caused a cross-linked hostinfo
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||||
|
|
||||||
// Put their info in our lighthouse
|
// Put their info in our lighthouse
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||||
|
|
||||||
// Start the servers
|
// Start the servers
|
||||||
myControl.Start()
|
myControl.Start()
|
||||||
@@ -1064,8 +932,8 @@ func TestRaceRegression(t *testing.T) {
|
|||||||
//them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089
|
//them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089
|
||||||
|
|
||||||
t.Log("Start both handshakes")
|
t.Log("Start both handshakes")
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
t.Log("Get both stage 1")
|
t.Log("Get both stage 1")
|
||||||
myStage1ForThem := myControl.GetFromUDP(true)
|
myStage1ForThem := myControl.GetFromUDP(true)
|
||||||
@@ -1095,7 +963,7 @@ func TestRaceRegression(t *testing.T) {
|
|||||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
t.Log("Make sure the tunnel still works")
|
t.Log("Make sure the tunnel still works")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||||
|
|
||||||
myControl.Stop()
|
myControl.Stop()
|
||||||
theirControl.Stop()
|
theirControl.Stop()
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
@@ -13,7 +12,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewTestCaCert will generate a CA cert
|
// NewTestCaCert will generate a CA cert
|
||||||
func NewTestCaCert(before, after time.Time, ips, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
func NewTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
if before.IsZero() {
|
if before.IsZero() {
|
||||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
@@ -34,17 +33,11 @@ func NewTestCaCert(before, after time.Time, ips, subnets []netip.Prefix, groups
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(ips) > 0 {
|
if len(ips) > 0 {
|
||||||
nc.Details.Ips = make([]*net.IPNet, len(ips))
|
nc.Details.Ips = ips
|
||||||
for i, ip := range ips {
|
|
||||||
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(subnets) > 0 {
|
if len(subnets) > 0 {
|
||||||
nc.Details.Subnets = make([]*net.IPNet, len(subnets))
|
nc.Details.Subnets = subnets
|
||||||
for i, ip := range subnets {
|
|
||||||
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(groups) > 0 {
|
if len(groups) > 0 {
|
||||||
@@ -66,7 +59,7 @@ func NewTestCaCert(before, after time.Time, ips, subnets []netip.Prefix, groups
|
|||||||
|
|
||||||
// NewTestCert will generate a signed certificate with the provided details.
|
// NewTestCert will generate a signed certificate with the provided details.
|
||||||
// Expiry times are defaulted if you do not pass them in
|
// Expiry times are defaulted if you do not pass them in
|
||||||
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip netip.Prefix, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
issuer, err := ca.Sha256Sum()
|
issuer, err := ca.Sha256Sum()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -81,12 +74,12 @@ func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, af
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub, rawPriv := x25519Keypair()
|
pub, rawPriv := x25519Keypair()
|
||||||
ipb := ip.Addr().AsSlice()
|
|
||||||
nc := &cert.NebulaCertificate{
|
nc := &cert.NebulaCertificate{
|
||||||
Details: cert.NebulaCertificateDetails{
|
Details: cert.NebulaCertificateDetails{
|
||||||
Name: name,
|
Name: name,
|
||||||
Ips: []*net.IPNet{{IP: ipb[:], Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}},
|
Ips: []*net.IPNet{ip},
|
||||||
//Subnets: subnets,
|
Subnets: subnets,
|
||||||
Groups: groups,
|
Groups: groups,
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ package e2e
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
@@ -26,23 +27,15 @@ import (
|
|||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
|
|
||||||
// newSimpleServer creates a nebula instance with many assumptions
|
// newSimpleServer creates a nebula instance with many assumptions
|
||||||
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, sVpnIpNet string, overrides m) (*nebula.Control, netip.Prefix, netip.AddrPort, *config.C) {
|
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) {
|
||||||
l := NewTestLogger()
|
l := NewTestLogger()
|
||||||
|
|
||||||
vpnIpNet, err := netip.ParsePrefix(sVpnIpNet)
|
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
||||||
if err != nil {
|
copy(vpnIpNet.IP, udpIp)
|
||||||
panic(err)
|
vpnIpNet.IP[1] += 128
|
||||||
}
|
udpAddr := net.UDPAddr{
|
||||||
|
IP: udpIp,
|
||||||
var udpAddr netip.AddrPort
|
Port: 4242,
|
||||||
if vpnIpNet.Addr().Is4() {
|
|
||||||
budpIp := vpnIpNet.Addr().As4()
|
|
||||||
budpIp[1] -= 128
|
|
||||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
|
|
||||||
} else {
|
|
||||||
budpIp := vpnIpNet.Addr().As16()
|
|
||||||
budpIp[13] -= 128
|
|
||||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
|
||||||
}
|
}
|
||||||
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||||
|
|
||||||
@@ -74,8 +67,8 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, s
|
|||||||
// "try_interval": "1s",
|
// "try_interval": "1s",
|
||||||
//},
|
//},
|
||||||
"listen": m{
|
"listen": m{
|
||||||
"host": udpAddr.Addr().String(),
|
"host": udpAddr.IP.String(),
|
||||||
"port": udpAddr.Port(),
|
"port": udpAddr.Port,
|
||||||
},
|
},
|
||||||
"logging": m{
|
"logging": m{
|
||||||
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
||||||
@@ -109,7 +102,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, s
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return control, vpnIpNet, udpAddr, c
|
return control, vpnIpNet, &udpAddr, c
|
||||||
}
|
}
|
||||||
|
|
||||||
type doneCb func()
|
type doneCb func()
|
||||||
@@ -130,7 +123,7 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control, r *router.R) {
|
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
||||||
// Send a packet from them to me
|
// Send a packet from them to me
|
||||||
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
||||||
bPacket := r.RouteForAllUntilTxTun(controlA)
|
bPacket := r.RouteForAllUntilTxTun(controlA)
|
||||||
@@ -142,20 +135,23 @@ func assertTunnel(t *testing.T, vpnIpA, vpnIpB netip.Addr, controlA, controlB *n
|
|||||||
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control) {
|
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) {
|
||||||
// Get both host infos
|
// Get both host infos
|
||||||
hBinA := controlA.GetHostInfoByVpnIp(vpnIpB, false)
|
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false)
|
||||||
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
|
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
|
||||||
|
|
||||||
hAinB := controlB.GetHostInfoByVpnIp(vpnIpA, false)
|
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false)
|
||||||
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
|
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
|
||||||
|
|
||||||
// Check that both vpn and real addr are correct
|
// Check that both vpn and real addr are correct
|
||||||
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
|
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
|
||||||
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
|
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
|
||||||
|
|
||||||
assert.Equal(t, addrB, hBinA.CurrentRemote, "Host B remote is wrong in control A")
|
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A")
|
||||||
assert.Equal(t, addrA, hAinB.CurrentRemote, "Host A remote is wrong in control B")
|
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B")
|
||||||
|
|
||||||
|
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
|
||||||
|
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
|
||||||
|
|
||||||
// Check that our indexes match
|
// Check that our indexes match
|
||||||
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
|
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
|
||||||
@@ -178,13 +174,13 @@ func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnIpA, vpnIp
|
|||||||
//checkIndexes("hmB", hmB, hAinB)
|
//checkIndexes("hmB", hmB, hAinB)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) {
|
||||||
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
|
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||||
assert.NotNil(t, v4, "No ipv4 data found")
|
assert.NotNil(t, v4, "No ipv4 data found")
|
||||||
|
|
||||||
assert.Equal(t, fromIp.AsSlice(), []byte(v4.SrcIP), "Source ip was incorrect")
|
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect")
|
||||||
assert.Equal(t, toIp.AsSlice(), []byte(v4.DstIP), "Dest ip was incorrect")
|
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect")
|
||||||
|
|
||||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||||
assert.NotNil(t, udp, "No udp data found")
|
assert.NotNil(t, udp, "No udp data found")
|
||||||
|
|||||||
@@ -5,11 +5,11 @@ package router
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type edge struct {
|
type edge struct {
|
||||||
@@ -118,14 +118,14 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
|
|||||||
return r, globalLines
|
return r, globalLines
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortedHosts(hosts map[netip.Addr]*nebula.HostInfo) []netip.Addr {
|
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp {
|
||||||
keys := make([]netip.Addr, 0, len(hosts))
|
keys := make([]iputil.VpnIp, 0, len(hosts))
|
||||||
for key := range hosts {
|
for key := range hosts {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.SliceStable(keys, func(i, j int) bool {
|
sort.SliceStable(keys, func(i, j int) bool {
|
||||||
return keys[i].Compare(keys[j]) > 0
|
return keys[i] > keys[j]
|
||||||
})
|
})
|
||||||
|
|
||||||
return keys
|
return keys
|
||||||
|
|||||||
@@ -6,11 +6,12 @@ package router
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -20,6 +21,7 @@ import (
|
|||||||
"github.com/google/gopacket/layers"
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
@@ -27,18 +29,18 @@ import (
|
|||||||
type R struct {
|
type R struct {
|
||||||
// Simple map of the ip:port registered on a control to the control
|
// Simple map of the ip:port registered on a control to the control
|
||||||
// Basically a router, right?
|
// Basically a router, right?
|
||||||
controls map[netip.AddrPort]*nebula.Control
|
controls map[string]*nebula.Control
|
||||||
|
|
||||||
// A map for inbound packets for a control that doesn't know about this address
|
// A map for inbound packets for a control that doesn't know about this address
|
||||||
inNat map[netip.AddrPort]*nebula.Control
|
inNat map[string]*nebula.Control
|
||||||
|
|
||||||
// A last used map, if an inbound packet hit the inNat map then
|
// A last used map, if an inbound packet hit the inNat map then
|
||||||
// all return packets should use the same last used inbound address for the outbound sender
|
// all return packets should use the same last used inbound address for the outbound sender
|
||||||
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
|
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
|
||||||
outNat map[string]netip.AddrPort
|
outNat map[string]net.UDPAddr
|
||||||
|
|
||||||
// A map of vpn ip to the nebula control it belongs to
|
// A map of vpn ip to the nebula control it belongs to
|
||||||
vpnControls map[netip.Addr]*nebula.Control
|
vpnControls map[iputil.VpnIp]*nebula.Control
|
||||||
|
|
||||||
ignoreFlows []ignoreFlow
|
ignoreFlows []ignoreFlow
|
||||||
flow []flowEntry
|
flow []flowEntry
|
||||||
@@ -116,10 +118,10 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := &R{
|
r := &R{
|
||||||
controls: make(map[netip.AddrPort]*nebula.Control),
|
controls: make(map[string]*nebula.Control),
|
||||||
vpnControls: make(map[netip.Addr]*nebula.Control),
|
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
|
||||||
inNat: make(map[netip.AddrPort]*nebula.Control),
|
inNat: make(map[string]*nebula.Control),
|
||||||
outNat: make(map[string]netip.AddrPort),
|
outNat: make(map[string]net.UDPAddr),
|
||||||
flow: []flowEntry{},
|
flow: []flowEntry{},
|
||||||
ignoreFlows: []ignoreFlow{},
|
ignoreFlows: []ignoreFlow{},
|
||||||
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
||||||
@@ -133,7 +135,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
|||||||
for _, c := range controls {
|
for _, c := range controls {
|
||||||
addr := c.GetUDPAddr()
|
addr := c.GetUDPAddr()
|
||||||
if _, ok := r.controls[addr]; ok {
|
if _, ok := r.controls[addr]; ok {
|
||||||
panic("Duplicate listen address: " + addr.String())
|
panic("Duplicate listen address: " + addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.vpnControls[c.GetVpnIp()] = c
|
r.vpnControls[c.GetVpnIp()] = c
|
||||||
@@ -163,13 +165,13 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
|||||||
// It does not look at the addr attached to the instance.
|
// It does not look at the addr attached to the instance.
|
||||||
// If a route is used, this will behave like a NAT for the return path.
|
// If a route is used, this will behave like a NAT for the return path.
|
||||||
// Rewriting the source ip:port to what was last sent to from the origin
|
// Rewriting the source ip:port to what was last sent to from the origin
|
||||||
func (r *R) AddRoute(ip netip.Addr, port uint16, c *nebula.Control) {
|
func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) {
|
||||||
r.Lock()
|
r.Lock()
|
||||||
defer r.Unlock()
|
defer r.Unlock()
|
||||||
|
|
||||||
inAddr := netip.AddrPortFrom(ip, port)
|
inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port))
|
||||||
if _, ok := r.inNat[inAddr]; ok {
|
if _, ok := r.inNat[inAddr]; ok {
|
||||||
panic("Duplicate listen address inNat: " + inAddr.String())
|
panic("Duplicate listen address inNat: " + inAddr)
|
||||||
}
|
}
|
||||||
r.inNat[inAddr] = c
|
r.inNat[inAddr] = c
|
||||||
}
|
}
|
||||||
@@ -196,7 +198,7 @@ func (r *R) renderFlow() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var participants = map[netip.AddrPort]struct{}{}
|
var participants = map[string]struct{}{}
|
||||||
var participantsVals []string
|
var participantsVals []string
|
||||||
|
|
||||||
fmt.Fprintln(f, "```mermaid")
|
fmt.Fprintln(f, "```mermaid")
|
||||||
@@ -213,7 +215,7 @@ func (r *R) renderFlow() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
participants[addr] = struct{}{}
|
participants[addr] = struct{}{}
|
||||||
sanAddr := strings.Replace(addr.String(), ":", "-", 1)
|
sanAddr := strings.Replace(addr, ":", "-", 1)
|
||||||
participantsVals = append(participantsVals, sanAddr)
|
participantsVals = append(participantsVals, sanAddr)
|
||||||
fmt.Fprintf(
|
fmt.Fprintf(
|
||||||
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
||||||
@@ -250,9 +252,9 @@ func (r *R) renderFlow() {
|
|||||||
|
|
||||||
fmt.Fprintf(f,
|
fmt.Fprintf(f,
|
||||||
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
||||||
strings.Replace(p.from.GetUDPAddr().String(), ":", "-", 1),
|
strings.Replace(p.from.GetUDPAddr(), ":", "-", 1),
|
||||||
line,
|
line,
|
||||||
strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
|
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||||
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -303,7 +305,7 @@ func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
|
|||||||
func (r *R) renderHostmaps(title string) {
|
func (r *R) renderHostmaps(title string) {
|
||||||
c := maps.Values(r.controls)
|
c := maps.Values(r.controls)
|
||||||
sort.SliceStable(c, func(i, j int) bool {
|
sort.SliceStable(c, func(i, j int) bool {
|
||||||
return c[i].GetVpnIp().Compare(c[j].GetVpnIp()) > 0
|
return c[i].GetVpnIp() > c[j].GetVpnIp()
|
||||||
})
|
})
|
||||||
|
|
||||||
s := renderHostmaps(c...)
|
s := renderHostmaps(c...)
|
||||||
@@ -418,8 +420,10 @@ func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []
|
|||||||
|
|
||||||
// Nope, lets push the sender along
|
// Nope, lets push the sender along
|
||||||
case p := <-udpTx:
|
case p := <-udpTx:
|
||||||
|
outAddr := sender.GetUDPAddr()
|
||||||
r.Lock()
|
r.Lock()
|
||||||
c := r.getControl(sender.GetUDPAddr(), p.To, p)
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
c := r.getControl(outAddr, inAddr, p)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("No control for udp tx")
|
panic("No control for udp tx")
|
||||||
@@ -475,7 +479,10 @@ func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
|
|||||||
} else {
|
} else {
|
||||||
// we are a udp tx, route and continue
|
// we are a udp tx, route and continue
|
||||||
p := rx.Interface().(*udp.Packet)
|
p := rx.Interface().(*udp.Packet)
|
||||||
c := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
outAddr := cm[x].GetUDPAddr()
|
||||||
|
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
c := r.getControl(outAddr, inAddr, p)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("No control for udp tx")
|
panic("No control for udp tx")
|
||||||
@@ -502,10 +509,12 @@ func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
receiver := r.getControl(sender.GetUDPAddr(), p.To, p)
|
outAddr := sender.GetUDPAddr()
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
receiver := r.getControl(outAddr, inAddr, p)
|
||||||
if receiver == nil {
|
if receiver == nil {
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("Can't RouteExitFunc for host: " + p.To.String())
|
panic("Can't route for host: " + inAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
e := whatDo(p, receiver)
|
e := whatDo(p, receiver)
|
||||||
@@ -581,13 +590,13 @@ func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet
|
|||||||
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
||||||
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
|
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
|
||||||
// If the router doesn't have the nebula controller for that address, we panic
|
// If the router doesn't have the nebula controller for that address, we panic
|
||||||
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr netip.AddrPort, finish ExitType) {
|
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) {
|
||||||
if finish == KeepRouting {
|
if finish == KeepRouting {
|
||||||
finish = RouteAndExit
|
finish = RouteAndExit
|
||||||
}
|
}
|
||||||
|
|
||||||
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
|
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
|
||||||
if p.To == toAddr {
|
if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) {
|
||||||
return finish
|
return finish
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -621,10 +630,13 @@ func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
|
|||||||
r.Lock()
|
r.Lock()
|
||||||
|
|
||||||
p := rx.Interface().(*udp.Packet)
|
p := rx.Interface().(*udp.Packet)
|
||||||
receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
|
||||||
|
outAddr := cm[x].GetUDPAddr()
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
receiver := r.getControl(outAddr, inAddr, p)
|
||||||
if receiver == nil {
|
if receiver == nil {
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("Can't RouteForAllExitFunc for host: " + p.To.String())
|
panic("Can't route for host: " + inAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
e := whatDo(p, receiver)
|
e := whatDo(p, receiver)
|
||||||
@@ -685,26 +697,41 @@ func (r *R) FlushAll() {
|
|||||||
|
|
||||||
p := rx.Interface().(*udp.Packet)
|
p := rx.Interface().(*udp.Packet)
|
||||||
|
|
||||||
receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
outAddr := cm[x].GetUDPAddr()
|
||||||
|
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||||
|
receiver := r.getControl(outAddr, inAddr, p)
|
||||||
if receiver == nil {
|
if receiver == nil {
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("Can't FlushAll for host: " + p.To.String())
|
panic("Can't route for host: " + inAddr)
|
||||||
}
|
}
|
||||||
receiver.InjectUDPPacket(p)
|
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
|
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
|
||||||
// This is an internal router function, the caller must hold the lock
|
// This is an internal router function, the caller must hold the lock
|
||||||
func (r *R) getControl(fromAddr, toAddr netip.AddrPort, p *udp.Packet) *nebula.Control {
|
func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control {
|
||||||
if newAddr, ok := r.outNat[fromAddr.String()+":"+toAddr.String()]; ok {
|
if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok {
|
||||||
p.From = newAddr
|
p.FromIp = newAddr.IP
|
||||||
|
p.FromPort = uint16(newAddr.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, ok := r.inNat[toAddr]
|
c, ok := r.inNat[toAddr]
|
||||||
if ok {
|
if ok {
|
||||||
r.outNat[c.GetUDPAddr().String()+":"+fromAddr.String()] = toAddr
|
sHost, sPort, err := net.SplitHostPort(toAddr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := strconv.Atoi(sPort)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{
|
||||||
|
IP: net.ParseIP(sHost),
|
||||||
|
Port: port,
|
||||||
|
}
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -719,9 +746,8 @@ func (r *R) formatUdpPacket(p *packet) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
from := "unknown"
|
from := "unknown"
|
||||||
srcAddr, _ := netip.AddrFromSlice(v4.SrcIP)
|
if c, ok := r.vpnControls[iputil.Ip2VpnIp(v4.SrcIP)]; ok {
|
||||||
if c, ok := r.vpnControls[srcAddr]; ok {
|
from = c.GetUDPAddr()
|
||||||
from = c.GetUDPAddr().String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||||
@@ -733,7 +759,7 @@ func (r *R) formatUdpPacket(p *packet) string {
|
|||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
||||||
strings.Replace(from, ":", "-", 1),
|
strings.Replace(from, ":", "-", 1),
|
||||||
strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
|
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||||
udp.SrcPort,
|
udp.SrcPort,
|
||||||
udp.DstPort,
|
udp.DstPort,
|
||||||
string(data.Payload()),
|
string(data.Payload()),
|
||||||
|
|||||||
@@ -1,55 +0,0 @@
|
|||||||
//go:build e2e_testing
|
|
||||||
// +build e2e_testing
|
|
||||||
|
|
||||||
package e2e
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDropInactiveTunnels(t *testing.T) {
|
|
||||||
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
|
||||||
// under ideal conditions
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
|
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
|
|
||||||
|
|
||||||
// Share our underlay information
|
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
|
||||||
|
|
||||||
// Start the servers
|
|
||||||
myControl.Start()
|
|
||||||
theirControl.Start()
|
|
||||||
|
|
||||||
r := router.NewR(t, myControl, theirControl)
|
|
||||||
|
|
||||||
r.Log("Assert the tunnel between me and them works")
|
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
|
||||||
|
|
||||||
r.Log("Go inactive and wait for the tunnels to get dropped")
|
|
||||||
waitStart := time.Now()
|
|
||||||
for {
|
|
||||||
myIndexes := len(myControl.GetHostmap().Indexes)
|
|
||||||
theirIndexes := len(theirControl.GetHostmap().Indexes)
|
|
||||||
if myIndexes == 0 && theirIndexes == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
since := time.Since(waitStart)
|
|
||||||
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
|
|
||||||
if since > time.Second*30 {
|
|
||||||
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
r.FlushAll()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
|
|
||||||
myControl.Stop()
|
|
||||||
theirControl.Stop()
|
|
||||||
}
|
|
||||||
@@ -167,7 +167,8 @@ punchy:
|
|||||||
|
|
||||||
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
||||||
# path to a network adjacent nebula node.
|
# path to a network adjacent nebula node.
|
||||||
# This setting is reloadable.
|
# NOTE: the previous option "local_range" only allowed definition of a single range
|
||||||
|
# and has been deprecated for "preferred_ranges"
|
||||||
#preferred_ranges: ["172.16.0.0/24"]
|
#preferred_ranges: ["172.16.0.0/24"]
|
||||||
|
|
||||||
# sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
|
# sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
|
||||||
@@ -180,15 +181,12 @@ punchy:
|
|||||||
# A file containing the ssh host private key to use
|
# A file containing the ssh host private key to use
|
||||||
# A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
# A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
||||||
#host_key: ./ssh_host_ed25519_key
|
#host_key: ./ssh_host_ed25519_key
|
||||||
# Authorized users and their public keys
|
# A file containing a list of authorized public keys
|
||||||
#authorized_users:
|
#authorized_users:
|
||||||
#- user: steeeeve
|
#- user: steeeeve
|
||||||
# keys can be an array of strings or single string
|
# keys can be an array of strings or single string
|
||||||
#keys:
|
#keys:
|
||||||
#- "ssh public key string"
|
#- "ssh public key string"
|
||||||
# Trusted SSH CA public keys. These are the public keys of the CAs that are allowed to sign SSH keys for access.
|
|
||||||
#trusted_cas:
|
|
||||||
#- "ssh public key string"
|
|
||||||
|
|
||||||
# EXPERIMENTAL: relay support for networks that can't establish direct connections.
|
# EXPERIMENTAL: relay support for networks that can't establish direct connections.
|
||||||
relay:
|
relay:
|
||||||
@@ -232,7 +230,6 @@ tun:
|
|||||||
# `mtu`: will default to tun mtu if this option is not specified
|
# `mtu`: will default to tun mtu if this option is not specified
|
||||||
# `metric`: will default to 0 if this option is not specified
|
# `metric`: will default to 0 if this option is not specified
|
||||||
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||||
# This setting is reloadable.
|
|
||||||
unsafe_routes:
|
unsafe_routes:
|
||||||
#- route: 172.16.1.0/24
|
#- route: 172.16.1.0/24
|
||||||
# via: 192.168.100.99
|
# via: 192.168.100.99
|
||||||
@@ -247,10 +244,7 @@ tun:
|
|||||||
# TODO
|
# TODO
|
||||||
# Configure logging level
|
# Configure logging level
|
||||||
logging:
|
logging:
|
||||||
# panic, fatal, error, warning, info, or debug. Default is info and is reloadable.
|
# panic, fatal, error, warning, info, or debug. Default is info
|
||||||
#NOTE: Debug mode can log remotely controlled/untrusted data which can quickly fill a disk in some
|
|
||||||
# scenarios. Debug logging is also CPU intensive and will decrease performance overall.
|
|
||||||
# Only enable debug logging while actively investigating an issue.
|
|
||||||
level: info
|
level: info
|
||||||
# json or text formats currently available. Default is text
|
# json or text formats currently available. Default is text
|
||||||
format: text
|
format: text
|
||||||
@@ -278,6 +272,10 @@ logging:
|
|||||||
#namespace: prometheusns
|
#namespace: prometheusns
|
||||||
#subsystem: nebula
|
#subsystem: nebula
|
||||||
#interval: 10s
|
#interval: 10s
|
||||||
|
# You can optionally set static labels to include with all metrics
|
||||||
|
#labels:
|
||||||
|
# myStaticLabel1: value1
|
||||||
|
# myStaticLabel2: value2
|
||||||
|
|
||||||
# enables counter metrics for meta packets
|
# enables counter metrics for meta packets
|
||||||
# e.g.: `messages.tx.handshake`
|
# e.g.: `messages.tx.handshake`
|
||||||
@@ -303,18 +301,6 @@ logging:
|
|||||||
# after receiving the response for lighthouse queries
|
# after receiving the response for lighthouse queries
|
||||||
#trigger_buffer: 64
|
#trigger_buffer: 64
|
||||||
|
|
||||||
# Tunnel manager settings
|
|
||||||
#tunnels:
|
|
||||||
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
|
|
||||||
# elapsed.
|
|
||||||
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
|
|
||||||
# This setting is reloadable
|
|
||||||
#drop_inactive: false
|
|
||||||
|
|
||||||
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
|
|
||||||
# inactive and eligible to be dropped.
|
|
||||||
# This setting is reloadable
|
|
||||||
#inactivity_timeout: 10m
|
|
||||||
|
|
||||||
# Nebula security group configuration
|
# Nebula security group configuration
|
||||||
firewall:
|
firewall:
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/service"
|
"github.com/slackhq/nebula/service"
|
||||||
@@ -55,16 +54,16 @@ pki:
|
|||||||
cert: /home/rice/Developer/nebula-config/app.crt
|
cert: /home/rice/Developer/nebula-config/app.crt
|
||||||
key: /home/rice/Developer/nebula-config/app.key
|
key: /home/rice/Developer/nebula-config/app.key
|
||||||
`
|
`
|
||||||
var cfg config.C
|
var config config.C
|
||||||
if err := cfg.LoadString(configStr); err != nil {
|
if err := config.LoadString(configStr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
svc, err := service.New(&cfg)
|
service, err := service.New(&config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ln, err := svc.Listen("tcp", ":1234")
|
ln, err := service.Listen("tcp", ":1234")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -74,24 +73,16 @@ pki:
|
|||||||
log.Printf("accept error: %s", err)
|
log.Printf("accept error: %s", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
defer func(conn net.Conn) {
|
defer conn.Close()
|
||||||
_ = conn.Close()
|
|
||||||
}(conn)
|
|
||||||
|
|
||||||
log.Printf("got connection")
|
log.Printf("got connection")
|
||||||
|
|
||||||
_, err = conn.Write([]byte("hello world\n"))
|
conn.Write([]byte("hello world\n"))
|
||||||
if err != nil {
|
|
||||||
log.Printf("write error: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(conn)
|
scanner := bufio.NewScanner(conn)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
message := scanner.Text()
|
message := scanner.Text()
|
||||||
_, err = fmt.Fprintf(conn, "echo: %q\n", message)
|
fmt.Fprintf(conn, "echo: %q\n", message)
|
||||||
if err != nil {
|
|
||||||
log.Printf("write error: %s", err)
|
|
||||||
}
|
|
||||||
log.Printf("got message %q", message)
|
log.Printf("got message %q", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,8 +92,8 @@ pki:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = svc.Close()
|
service.Close()
|
||||||
if err := svc.Wait(); err != nil {
|
if err := service.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
138
examples/quickstart-vagrant/README.md
Normal file
138
examples/quickstart-vagrant/README.md
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
# Quickstart Guide
|
||||||
|
|
||||||
|
This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula.
|
||||||
|
|
||||||
|
## Creating the virtualenv for ansible
|
||||||
|
|
||||||
|
Within the `quickstart/` directory, do the following
|
||||||
|
|
||||||
|
```
|
||||||
|
# make a virtual environment
|
||||||
|
virtualenv venv
|
||||||
|
|
||||||
|
# get into the virtualenv
|
||||||
|
source venv/bin/activate
|
||||||
|
|
||||||
|
# install ansible
|
||||||
|
pip install -r requirements.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Bringing up the vagrant environment
|
||||||
|
|
||||||
|
A plugin that is used for the Vagrant environment is `vagrant-hostmanager`
|
||||||
|
|
||||||
|
To install, run
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant plugin install vagrant-hostmanager
|
||||||
|
```
|
||||||
|
|
||||||
|
All hosts within the Vagrantfile are brought up with
|
||||||
|
|
||||||
|
`vagrant up`
|
||||||
|
|
||||||
|
Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running
|
||||||
|
|
||||||
|
`ansible-playbook playbook.yml -i inventory -u vagrant`
|
||||||
|
|
||||||
|
## Testing within the vagrant env
|
||||||
|
|
||||||
|
Once the ansible run is done, hop onto a vagrant box
|
||||||
|
|
||||||
|
`vagrant ssh generic1.vagrant`
|
||||||
|
|
||||||
|
or specifically
|
||||||
|
|
||||||
|
`ssh vagrant@<ip-address-in-vagrant-file` (password for the vagrant user on the boxes is `vagrant`)
|
||||||
|
|
||||||
|
Some quick tests once the vagrant boxes are up are to ping from `generic1.vagrant` to `generic2.vagrant` using
|
||||||
|
their respective nebula ip address.
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant@generic1:~$ ping 10.168.91.220
|
||||||
|
PING 10.168.91.220 (10.168.91.220) 56(84) bytes of data.
|
||||||
|
64 bytes from 10.168.91.220: icmp_seq=1 ttl=64 time=241 ms
|
||||||
|
64 bytes from 10.168.91.220: icmp_seq=2 ttl=64 time=0.704 ms
|
||||||
|
```
|
||||||
|
|
||||||
|
You can further verify that the allowed nebula firewall rules work by ssh'ing from 1 generic box to the other.
|
||||||
|
|
||||||
|
`ssh vagrant@<nebula-ip-address>` (password for the vagrant user on the boxes is `vagrant`)
|
||||||
|
|
||||||
|
See `/etc/nebula/config.yml` on a box for firewall rules.
|
||||||
|
|
||||||
|
To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from
|
||||||
|
info to debug.
|
||||||
|
|
||||||
|
You can watch nebula logs by running
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo journalctl -fu nebula
|
||||||
|
```
|
||||||
|
|
||||||
|
Refer to the nebula src code directory's README for further instructions on configuring nebula.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Is nebula up and running?
|
||||||
|
|
||||||
|
Run and verify that
|
||||||
|
|
||||||
|
```
|
||||||
|
ifconfig
|
||||||
|
```
|
||||||
|
|
||||||
|
shows you an interface with the name `nebula1` being up.
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant@generic1:~$ ifconfig nebula1
|
||||||
|
nebula1: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1300
|
||||||
|
inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210
|
||||||
|
inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20<link>
|
||||||
|
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
|
||||||
|
RX packets 2 bytes 168 (168.0 B)
|
||||||
|
RX errors 0 dropped 0 overruns 0 frame 0
|
||||||
|
TX packets 11 bytes 600 (600.0 B)
|
||||||
|
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
|
||||||
|
```
|
||||||
|
|
||||||
|
### Connectivity
|
||||||
|
|
||||||
|
Are you able to ping other boxes on the private nebula network?
|
||||||
|
|
||||||
|
The following are the private nebula ip addresses of the vagrant env
|
||||||
|
|
||||||
|
```
|
||||||
|
generic1.vagrant [nebula_ip] 10.168.91.210
|
||||||
|
generic2.vagrant [nebula_ip] 10.168.91.220
|
||||||
|
lighthouse1.vagrant [nebula_ip] 10.168.91.230
|
||||||
|
```
|
||||||
|
|
||||||
|
Try pinging generic1.vagrant to and from any other box using its nebula ip above.
|
||||||
|
|
||||||
|
Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port.
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml
|
||||||
|
firewall:
|
||||||
|
conntrack:
|
||||||
|
tcp_timeout: 12m
|
||||||
|
udp_timeout: 3m
|
||||||
|
default_timeout: 10m
|
||||||
|
|
||||||
|
inbound:
|
||||||
|
- proto: icmp
|
||||||
|
port: any
|
||||||
|
host: any
|
||||||
|
- proto: any
|
||||||
|
port: 22
|
||||||
|
host: any
|
||||||
|
- proto: any
|
||||||
|
port: 53
|
||||||
|
host: any
|
||||||
|
|
||||||
|
outbound:
|
||||||
|
- proto: any
|
||||||
|
port: any
|
||||||
|
host: any
|
||||||
|
```
|
||||||
40
examples/quickstart-vagrant/Vagrantfile
vendored
Normal file
40
examples/quickstart-vagrant/Vagrantfile
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
Vagrant.require_version ">= 2.2.6"
|
||||||
|
|
||||||
|
nodes = [
|
||||||
|
{ :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||||
|
{ :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||||
|
{ :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||||
|
]
|
||||||
|
|
||||||
|
Vagrant.configure("2") do |config|
|
||||||
|
|
||||||
|
config.ssh.insert_key = false
|
||||||
|
|
||||||
|
if Vagrant.has_plugin?('vagrant-cachier')
|
||||||
|
config.cache.enable :apt
|
||||||
|
else
|
||||||
|
printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n")
|
||||||
|
end
|
||||||
|
|
||||||
|
if Vagrant.has_plugin?('vagrant-hostmanager')
|
||||||
|
config.hostmanager.enabled = true
|
||||||
|
config.hostmanager.manage_host = true
|
||||||
|
config.hostmanager.include_offline = true
|
||||||
|
else
|
||||||
|
config.vagrant.plugins = "vagrant-hostmanager"
|
||||||
|
end
|
||||||
|
|
||||||
|
nodes.each do |node|
|
||||||
|
config.vm.define node[:hostname] do |node_config|
|
||||||
|
node_config.vm.box = node[:box]
|
||||||
|
node_config.vm.hostname = node[:hostname]
|
||||||
|
node_config.vm.network :private_network, ip: node[:ip]
|
||||||
|
node_config.vm.provider :virtualbox do |vb|
|
||||||
|
vb.memory = node[:ram]
|
||||||
|
vb.cpus = node[:cpus]
|
||||||
|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||||
|
vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
4
examples/quickstart-vagrant/ansible/ansible.cfg
Normal file
4
examples/quickstart-vagrant/ansible/ansible.cfg
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[defaults]
|
||||||
|
host_key_checking = False
|
||||||
|
private_key_file = ~/.vagrant.d/insecure_private_key
|
||||||
|
become = yes
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'to_nebula_ip': self.to_nebula_ip,
|
||||||
|
'map_to_nebula_ips': self.map_to_nebula_ips,
|
||||||
|
}
|
||||||
|
|
||||||
|
def to_nebula_ip(self, ip_str):
|
||||||
|
ip_list = list(map(int, ip_str.split(".")))
|
||||||
|
ip_list[0] = 10
|
||||||
|
ip_list[1] = 168
|
||||||
|
ip = '.'.join(map(str, ip_list))
|
||||||
|
return ip
|
||||||
|
|
||||||
|
def map_to_nebula_ips(self, ip_strs):
|
||||||
|
ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ]
|
||||||
|
ips = ', '.join(ip_list)
|
||||||
|
return ips
|
||||||
11
examples/quickstart-vagrant/ansible/inventory
Normal file
11
examples/quickstart-vagrant/ansible/inventory
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[all]
|
||||||
|
generic1.vagrant
|
||||||
|
generic2.vagrant
|
||||||
|
lighthouse1.vagrant
|
||||||
|
|
||||||
|
[generic]
|
||||||
|
generic1.vagrant
|
||||||
|
generic2.vagrant
|
||||||
|
|
||||||
|
[lighthouse]
|
||||||
|
lighthouse1.vagrant
|
||||||
23
examples/quickstart-vagrant/ansible/playbook.yml
Normal file
23
examples/quickstart-vagrant/ansible/playbook.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
- name: test connection to vagrant boxes
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- debug: msg=ok
|
||||||
|
|
||||||
|
- name: build nebula binaries locally
|
||||||
|
connection: local
|
||||||
|
hosts: localhost
|
||||||
|
tasks:
|
||||||
|
- command: chdir=../../../ make build/linux-amd64/"{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- nebula
|
||||||
|
- nebula-cert
|
||||||
|
tags:
|
||||||
|
- build-nebula
|
||||||
|
|
||||||
|
- name: install nebula on all vagrant hosts
|
||||||
|
hosts: all
|
||||||
|
become: yes
|
||||||
|
gather_facts: yes
|
||||||
|
roles:
|
||||||
|
- nebula
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# defaults file for nebula
|
||||||
|
nebula_config_directory: "/etc/nebula/"
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Nebula overlay networking tool
|
||||||
|
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||||
|
After=basic.target network.target network-online.target
|
||||||
|
Before=sshd.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
SyslogIdentifier=nebula
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
-----BEGIN NEBULA CERTIFICATE-----
|
||||||
|
CkAKDm5lYnVsYSB0ZXN0IENBKNXC1NYFMNXIhO0GOiCmVYeZ9tkB4WEnawmkrca+
|
||||||
|
hsAg9otUFhpAowZeJ33KVEABEkAORybHQUUyVFbKYzw0JHfVzAQOHA4kwB1yP9IV
|
||||||
|
KpiTw9+ADz+wA+R5tn9B+L8+7+Apc+9dem4BQULjA5mRaoYN
|
||||||
|
-----END NEBULA CERTIFICATE-----
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||||
|
FEXZKMSmg8CgIODR0ymUeNT3nbnVpMi7nD79UgkCRHWmVYeZ9tkB4WEnawmkrca+
|
||||||
|
hsAg9otUFhpAowZeJ33KVA==
|
||||||
|
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
# handlers file for nebula
|
||||||
|
|
||||||
|
- name: restart nebula
|
||||||
|
service: name=nebula state=restarted
|
||||||
@@ -0,0 +1,62 @@
|
|||||||
|
---
|
||||||
|
# tasks file for nebula
|
||||||
|
|
||||||
|
- name: get the vagrant network interface and set fact
|
||||||
|
set_fact:
|
||||||
|
vagrant_ifce: "ansible_{{ ansible_interfaces | difference(['lo',ansible_default_ipv4.alias]) | sort | first }}"
|
||||||
|
tags:
|
||||||
|
- nebula-conf
|
||||||
|
|
||||||
|
- name: install built nebula binary
|
||||||
|
copy: src="../../../../../build/linux-amd64/{{ item }}" dest="/usr/local/bin" mode=0755
|
||||||
|
with_items:
|
||||||
|
- nebula
|
||||||
|
- nebula-cert
|
||||||
|
|
||||||
|
- name: create nebula config directory
|
||||||
|
file: path="{{ nebula_config_directory }}" state=directory mode=0755
|
||||||
|
|
||||||
|
- name: temporarily copy over root.crt and root.key to sign
|
||||||
|
copy: src={{ item }} dest=/opt/{{ item }}
|
||||||
|
with_items:
|
||||||
|
- vagrant-test-ca.key
|
||||||
|
- vagrant-test-ca.crt
|
||||||
|
|
||||||
|
- name: remove previously signed host certificate
|
||||||
|
file: dest=/etc/nebula/{{ item }} state=absent
|
||||||
|
with_items:
|
||||||
|
- host.crt
|
||||||
|
- host.key
|
||||||
|
|
||||||
|
- name: sign using the root key
|
||||||
|
command: nebula-cert sign -ca-crt /opt/vagrant-test-ca.crt -ca-key /opt/vagrant-test-ca.key -duration 4320h -groups vagrant -ip {{ hostvars[inventory_hostname][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}/9 -name {{ ansible_hostname }}.nebula -out-crt /etc/nebula/host.crt -out-key /etc/nebula/host.key
|
||||||
|
|
||||||
|
- name: remove root.key used to sign
|
||||||
|
file: dest=/opt/{{ item }} state=absent
|
||||||
|
with_items:
|
||||||
|
- vagrant-test-ca.key
|
||||||
|
|
||||||
|
- name: write the content of the trusted ca certificate
|
||||||
|
copy: src="vagrant-test-ca.crt" dest="/etc/nebula/vagrant-test-ca.crt"
|
||||||
|
notify: restart nebula
|
||||||
|
|
||||||
|
- name: Create config directory
|
||||||
|
file: path="{{ nebula_config_directory }}" owner=root group=root mode=0755 state=directory
|
||||||
|
|
||||||
|
- name: nebula config
|
||||||
|
template: src=config.yml.j2 dest="/etc/nebula/config.yml" mode=0644 owner=root group=root
|
||||||
|
notify: restart nebula
|
||||||
|
tags:
|
||||||
|
- nebula-conf
|
||||||
|
|
||||||
|
- name: nebula systemd
|
||||||
|
copy: src=systemd.nebula.service dest="/etc/systemd/system/nebula.service" mode=0644 owner=root group=root
|
||||||
|
register: addconf
|
||||||
|
notify: restart nebula
|
||||||
|
|
||||||
|
- name: maybe reload systemd
|
||||||
|
shell: systemctl daemon-reload
|
||||||
|
when: addconf.changed
|
||||||
|
|
||||||
|
- name: nebula running
|
||||||
|
service: name="nebula" state=started enabled=yes
|
||||||
@@ -0,0 +1,85 @@
|
|||||||
|
pki:
|
||||||
|
ca: /etc/nebula/vagrant-test-ca.crt
|
||||||
|
cert: /etc/nebula/host.crt
|
||||||
|
key: /etc/nebula/host.key
|
||||||
|
|
||||||
|
# Port Nebula will be listening on
|
||||||
|
listen:
|
||||||
|
host: 0.0.0.0
|
||||||
|
port: 4242
|
||||||
|
|
||||||
|
# sshd can expose informational and administrative functions via ssh
|
||||||
|
sshd:
|
||||||
|
# Toggles the feature
|
||||||
|
enabled: true
|
||||||
|
# Host and port to listen on
|
||||||
|
listen: 127.0.0.1:2222
|
||||||
|
# A file containing the ssh host private key to use
|
||||||
|
host_key: /etc/ssh/ssh_host_ed25519_key
|
||||||
|
# A file containing a list of authorized public keys
|
||||||
|
authorized_users:
|
||||||
|
{% for user in nebula_users %}
|
||||||
|
- user: {{ user.name }}
|
||||||
|
keys:
|
||||||
|
{% for key in user.ssh_auth_keys %}
|
||||||
|
- "{{ key }}"
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
local_range: 10.168.0.0/16
|
||||||
|
|
||||||
|
static_host_map:
|
||||||
|
# lighthouse
|
||||||
|
{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}: ["{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address']}}:4242"]
|
||||||
|
|
||||||
|
default_route: "0.0.0.0"
|
||||||
|
|
||||||
|
lighthouse:
|
||||||
|
{% if 'lighthouse' in group_names %}
|
||||||
|
am_lighthouse: true
|
||||||
|
serve_dns: true
|
||||||
|
{% else %}
|
||||||
|
am_lighthouse: false
|
||||||
|
{% endif %}
|
||||||
|
interval: 60
|
||||||
|
{% if 'generic' in group_names %}
|
||||||
|
hosts:
|
||||||
|
- {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Configure the private interface
|
||||||
|
tun:
|
||||||
|
dev: nebula1
|
||||||
|
# Sets MTU of the tun dev.
|
||||||
|
# MTU of the tun must be smaller than the MTU of the eth0 interface
|
||||||
|
mtu: 1300
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
# Configure logging level
|
||||||
|
logging:
|
||||||
|
level: info
|
||||||
|
format: json
|
||||||
|
|
||||||
|
firewall:
|
||||||
|
conntrack:
|
||||||
|
tcp_timeout: 12m
|
||||||
|
udp_timeout: 3m
|
||||||
|
default_timeout: 10m
|
||||||
|
|
||||||
|
inbound:
|
||||||
|
- proto: icmp
|
||||||
|
port: any
|
||||||
|
host: any
|
||||||
|
- proto: any
|
||||||
|
port: 22
|
||||||
|
host: any
|
||||||
|
{% if "lighthouse" in groups %}
|
||||||
|
- proto: any
|
||||||
|
port: 53
|
||||||
|
host: any
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
outbound:
|
||||||
|
- proto: any
|
||||||
|
port: any
|
||||||
|
host: any
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
# vars file for nebula
|
||||||
|
|
||||||
|
nebula_users:
|
||||||
|
- name: user1
|
||||||
|
ssh_auth_keys:
|
||||||
|
- "ed25519 place-your-ssh-public-key-here"
|
||||||
1
examples/quickstart-vagrant/requirements.yml
Normal file
1
examples/quickstart-vagrant/requirements.yml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ansible
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
#!/sbin/openrc-run
|
|
||||||
#
|
|
||||||
# nebula service for open-rc systems
|
|
||||||
|
|
||||||
extra_commands="checkconfig"
|
|
||||||
|
|
||||||
: ${NEBULA_CONFDIR:=${RC_PREFIX%/}/etc/nebula}
|
|
||||||
: ${NEBULA_CONFIG:=${NEBULA_CONFDIR}/config.yml}
|
|
||||||
: ${NEBULA_BINARY:=${NEBULA_BINARY}${RC_PREFIX%/}/usr/local/sbin/nebula}
|
|
||||||
|
|
||||||
command="${NEBULA_BINARY}"
|
|
||||||
command_args="${NEBULA_OPTS} -config ${NEBULA_CONFIG}"
|
|
||||||
|
|
||||||
supervisor="supervise-daemon"
|
|
||||||
|
|
||||||
description="A scalable overlay networking tool with a focus on performance, simplicity and security"
|
|
||||||
|
|
||||||
required_dirs="${NEBULA_CONFDIR}"
|
|
||||||
required_files="${NEBULA_CONFIG}"
|
|
||||||
|
|
||||||
checkconfig() {
|
|
||||||
"${command}" -test ${command_args} || return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
start_pre() {
|
|
||||||
if [ "${RC_CMD}" != "restart" ] ; then
|
|
||||||
checkconfig || return $?
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
stop_pre() {
|
|
||||||
if [ "${RC_CMD}" = "restart" ] ; then
|
|
||||||
checkconfig || return $?
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
165
firewall.go
165
firewall.go
@@ -2,31 +2,37 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"net/netip"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const tcpACK = 0x10
|
||||||
|
const tcpFIN = 0x01
|
||||||
|
|
||||||
type FirewallInterface interface {
|
type FirewallInterface interface {
|
||||||
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error
|
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type conn struct {
|
type conn struct {
|
||||||
Expires time.Time // Time when this conntrack entry will expire
|
Expires time.Time // Time when this conntrack entry will expire
|
||||||
|
Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set
|
||||||
|
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
|
||||||
|
|
||||||
// record why the original connection passed the firewall, so we can re-validate
|
// record why the original connection passed the firewall, so we can re-validate
|
||||||
// after ruleset changes. Note, rulesVersion is a uint16 so that these two
|
// after ruleset changes. Note, rulesVersion is a uint16 so that these two
|
||||||
@@ -52,14 +58,16 @@ type Firewall struct {
|
|||||||
DefaultTimeout time.Duration //linux: 600s
|
DefaultTimeout time.Duration //linux: 600s
|
||||||
|
|
||||||
// Used to ensure we don't emit local packets for ips we don't own
|
// Used to ensure we don't emit local packets for ips we don't own
|
||||||
localIps *bart.Table[struct{}]
|
localIps *cidr.Tree4[struct{}]
|
||||||
assignedCIDR netip.Prefix
|
assignedCIDR *net.IPNet
|
||||||
hasSubnets bool
|
hasSubnets bool
|
||||||
|
|
||||||
rules string
|
rules string
|
||||||
rulesVersion uint16
|
rulesVersion uint16
|
||||||
|
|
||||||
defaultLocalCIDRAny bool
|
defaultLocalCIDRAny bool
|
||||||
|
trackTCPRTT bool
|
||||||
|
metricTCPRTT metrics.Histogram
|
||||||
incomingMetrics firewallMetrics
|
incomingMetrics firewallMetrics
|
||||||
outgoingMetrics firewallMetrics
|
outgoingMetrics firewallMetrics
|
||||||
|
|
||||||
@@ -108,7 +116,7 @@ type FirewallRule struct {
|
|||||||
Any *firewallLocalCIDR
|
Any *firewallLocalCIDR
|
||||||
Hosts map[string]*firewallLocalCIDR
|
Hosts map[string]*firewallLocalCIDR
|
||||||
Groups []*firewallGroups
|
Groups []*firewallGroups
|
||||||
CIDR *bart.Table[*firewallLocalCIDR]
|
CIDR *cidr.Tree4[*firewallLocalCIDR]
|
||||||
}
|
}
|
||||||
|
|
||||||
type firewallGroups struct {
|
type firewallGroups struct {
|
||||||
@@ -122,7 +130,7 @@ type firewallPort map[int32]*FirewallCA
|
|||||||
|
|
||||||
type firewallLocalCIDR struct {
|
type firewallLocalCIDR struct {
|
||||||
Any bool
|
Any bool
|
||||||
LocalCIDR *bart.Table[struct{}]
|
LocalCIDR *cidr.Tree4[struct{}]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
||||||
@@ -144,28 +152,20 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
|||||||
max = defaultTimeout
|
max = defaultTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
localIps := new(bart.Table[struct{}])
|
localIps := cidr.NewTree4[struct{}]()
|
||||||
var assignedCIDR netip.Prefix
|
var assignedCIDR *net.IPNet
|
||||||
var assignedSet bool
|
|
||||||
for _, ip := range c.Details.Ips {
|
for _, ip := range c.Details.Ips {
|
||||||
//TODO: IPV6-WORK the unmap is a bit unfortunate
|
ipNet := &net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}
|
||||||
nip, _ := netip.AddrFromSlice(ip.IP)
|
localIps.AddCIDR(ipNet, struct{}{})
|
||||||
nip = nip.Unmap()
|
|
||||||
nprefix := netip.PrefixFrom(nip, nip.BitLen())
|
|
||||||
localIps.Insert(nprefix, struct{}{})
|
|
||||||
|
|
||||||
if !assignedSet {
|
if assignedCIDR == nil {
|
||||||
// Only grabbing the first one in the cert since any more than that currently has undefined behavior
|
// Only grabbing the first one in the cert since any more than that currently has undefined behavior
|
||||||
assignedCIDR = nprefix
|
assignedCIDR = ipNet
|
||||||
assignedSet = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range c.Details.Subnets {
|
for _, n := range c.Details.Subnets {
|
||||||
nip, _ := netip.AddrFromSlice(n.IP)
|
localIps.AddCIDR(n, struct{}{})
|
||||||
ones, _ := n.Mask.Size()
|
|
||||||
nip = nip.Unmap()
|
|
||||||
localIps.Insert(netip.PrefixFrom(nip, ones), struct{}{})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Firewall{
|
return &Firewall{
|
||||||
@@ -183,6 +183,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
|||||||
hasSubnets: len(c.Details.Subnets) > 0,
|
hasSubnets: len(c.Details.Subnets) > 0,
|
||||||
l: l,
|
l: l,
|
||||||
|
|
||||||
|
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||||
incomingMetrics: firewallMetrics{
|
incomingMetrics: firewallMetrics{
|
||||||
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
|
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
|
||||||
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
|
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
|
||||||
@@ -245,15 +246,15 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddRule properly creates the in memory rule structure for a firewall table.
|
// AddRule properly creates the in memory rule structure for a firewall table.
|
||||||
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error {
|
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
||||||
// https://github.com/golang/go/issues/14131
|
// https://github.com/golang/go/issues/14131
|
||||||
sIp := ""
|
sIp := ""
|
||||||
if ip.IsValid() {
|
if ip != nil {
|
||||||
sIp = ip.String()
|
sIp = ip.String()
|
||||||
}
|
}
|
||||||
lIp := ""
|
lIp := ""
|
||||||
if localIp.IsValid() {
|
if localIp != nil {
|
||||||
lIp = localIp.String()
|
lIp = localIp.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -390,17 +391,17 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
|||||||
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
|
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
|
||||||
}
|
}
|
||||||
|
|
||||||
var cidr netip.Prefix
|
var cidr *net.IPNet
|
||||||
if r.Cidr != "" {
|
if r.Cidr != "" {
|
||||||
cidr, err = netip.ParsePrefix(r.Cidr)
|
_, cidr, err = net.ParseCIDR(r.Cidr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s rule #%v; cidr did not parse; %s", table, i, err)
|
return fmt.Errorf("%s rule #%v; cidr did not parse; %s", table, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var localCidr netip.Prefix
|
var localCidr *net.IPNet
|
||||||
if r.LocalCidr != "" {
|
if r.LocalCidr != "" {
|
||||||
localCidr, err = netip.ParsePrefix(r.LocalCidr)
|
_, localCidr, err = net.ParseCIDR(r.LocalCidr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err)
|
return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err)
|
||||||
}
|
}
|
||||||
@@ -421,16 +422,15 @@ var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
|
|||||||
|
|
||||||
// Drop returns an error if the packet should be dropped, explaining why. It
|
// Drop returns an error if the packet should be dropped, explaining why. It
|
||||||
// returns nil if the packet should not be dropped.
|
// returns nil if the packet should not be dropped.
|
||||||
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
||||||
// Check if we spoke to this tuple, if we did then allow this packet
|
// Check if we spoke to this tuple, if we did then allow this packet
|
||||||
if f.inConns(fp, h, caPool, localCache) {
|
if f.inConns(packet, fp, incoming, h, caPool, localCache) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure remote address matches nebula certificate
|
// Make sure remote address matches nebula certificate
|
||||||
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
||||||
//TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
|
ok, _ := remoteCidr.Contains(fp.RemoteIP)
|
||||||
_, ok := remoteCidr.Lookup(fp.RemoteIP)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||||
return ErrInvalidRemoteIP
|
return ErrInvalidRemoteIP
|
||||||
@@ -444,8 +444,7 @@ func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we are supposed to be handling this local ip address
|
// Make sure we are supposed to be handling this local ip address
|
||||||
//TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
|
ok, _ := f.localIps.Contains(fp.LocalIP)
|
||||||
_, ok := f.localIps.Lookup(fp.LocalIP)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
f.metrics(incoming).droppedLocalIP.Inc(1)
|
f.metrics(incoming).droppedLocalIP.Inc(1)
|
||||||
return ErrInvalidLocalIP
|
return ErrInvalidLocalIP
|
||||||
@@ -463,7 +462,7 @@ func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We always want to conntrack since it is a faster operation
|
// We always want to conntrack since it is a faster operation
|
||||||
f.addConn(fp, incoming)
|
f.addConn(packet, fp, incoming)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -492,7 +491,7 @@ func (f *Firewall) EmitStats() {
|
|||||||
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||||
if localCache != nil {
|
if localCache != nil {
|
||||||
if _, ok := localCache[fp]; ok {
|
if _, ok := localCache[fp]; ok {
|
||||||
return true
|
return true
|
||||||
@@ -552,6 +551,11 @@ func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.NebulaC
|
|||||||
switch fp.Protocol {
|
switch fp.Protocol {
|
||||||
case firewall.ProtoTCP:
|
case firewall.ProtoTCP:
|
||||||
c.Expires = time.Now().Add(f.TCPTimeout)
|
c.Expires = time.Now().Add(f.TCPTimeout)
|
||||||
|
if incoming {
|
||||||
|
f.checkTCPRTT(c, packet)
|
||||||
|
} else {
|
||||||
|
setTCPRTTTracking(c, packet)
|
||||||
|
}
|
||||||
case firewall.ProtoUDP:
|
case firewall.ProtoUDP:
|
||||||
c.Expires = time.Now().Add(f.UDPTimeout)
|
c.Expires = time.Now().Add(f.UDPTimeout)
|
||||||
default:
|
default:
|
||||||
@@ -567,13 +571,16 @@ func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.NebulaC
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) addConn(fp firewall.Packet, incoming bool) {
|
func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
|
||||||
var timeout time.Duration
|
var timeout time.Duration
|
||||||
c := &conn{}
|
c := &conn{}
|
||||||
|
|
||||||
switch fp.Protocol {
|
switch fp.Protocol {
|
||||||
case firewall.ProtoTCP:
|
case firewall.ProtoTCP:
|
||||||
timeout = f.TCPTimeout
|
timeout = f.TCPTimeout
|
||||||
|
if !incoming {
|
||||||
|
setTCPRTTTracking(c, packet)
|
||||||
|
}
|
||||||
case firewall.ProtoUDP:
|
case firewall.ProtoUDP:
|
||||||
timeout = f.UDPTimeout
|
timeout = f.UDPTimeout
|
||||||
default:
|
default:
|
||||||
@@ -599,6 +606,7 @@ func (f *Firewall) addConn(fp firewall.Packet, incoming bool) {
|
|||||||
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
|
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
|
||||||
// Caller must own the connMutex lock!
|
// Caller must own the connMutex lock!
|
||||||
func (f *Firewall) evict(p firewall.Packet) {
|
func (f *Firewall) evict(p firewall.Packet) {
|
||||||
|
//TODO: report a stat if the tcp rtt tracking was never resolved?
|
||||||
// Are we still tracking this conn?
|
// Are we still tracking this conn?
|
||||||
conntrack := f.Conntrack
|
conntrack := f.Conntrack
|
||||||
t, ok := conntrack.Conns[p]
|
t, ok := conntrack.Conns[p]
|
||||||
@@ -642,7 +650,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaC
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fp firewallPort) addRule(f *Firewall, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error {
|
func (fp firewallPort) addRule(f *Firewall, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
if startPort > endPort {
|
if startPort > endPort {
|
||||||
return fmt.Errorf("start port was lower than end port")
|
return fmt.Errorf("start port was lower than end port")
|
||||||
}
|
}
|
||||||
@@ -686,12 +694,12 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
|
|||||||
return fp[firewall.PortAny].match(p, c, caPool)
|
return fp[firewall.PortAny].match(p, c, caPool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FirewallCA) addRule(f *Firewall, groups []string, host string, ip, localIp netip.Prefix, caName, caSha string) error {
|
func (fc *FirewallCA) addRule(f *Firewall, groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error {
|
||||||
fr := func() *FirewallRule {
|
fr := func() *FirewallRule {
|
||||||
return &FirewallRule{
|
return &FirewallRule{
|
||||||
Hosts: make(map[string]*firewallLocalCIDR),
|
Hosts: make(map[string]*firewallLocalCIDR),
|
||||||
Groups: make([]*firewallGroups, 0),
|
Groups: make([]*firewallGroups, 0),
|
||||||
CIDR: new(bart.Table[*firewallLocalCIDR]),
|
CIDR: cidr.NewTree4[*firewallLocalCIDR](),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -749,10 +757,10 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool
|
|||||||
return fc.CANames[s.Details.Name].match(p, c)
|
return fc.CANames[s.Details.Name].match(p, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
|
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip *net.IPNet, localCIDR *net.IPNet) error {
|
||||||
flc := func() *firewallLocalCIDR {
|
flc := func() *firewallLocalCIDR {
|
||||||
return &firewallLocalCIDR{
|
return &firewallLocalCIDR{
|
||||||
LocalCIDR: new(bart.Table[struct{}]),
|
LocalCIDR: cidr.NewTree4[struct{}](),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -789,8 +797,8 @@ func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, l
|
|||||||
fr.Hosts[host] = nlc
|
fr.Hosts[host] = nlc
|
||||||
}
|
}
|
||||||
|
|
||||||
if ip.IsValid() {
|
if ip != nil {
|
||||||
nlc, _ := fr.CIDR.Get(ip)
|
_, nlc := fr.CIDR.GetCIDR(ip)
|
||||||
if nlc == nil {
|
if nlc == nil {
|
||||||
nlc = flc()
|
nlc = flc()
|
||||||
}
|
}
|
||||||
@@ -798,14 +806,14 @@ func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, l
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fr.CIDR.Insert(ip, nlc)
|
fr.CIDR.AddCIDR(ip, nlc)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) isAny(groups []string, host string, ip netip.Prefix) bool {
|
func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool {
|
||||||
if len(groups) == 0 && host == "" && !ip.IsValid() {
|
if len(groups) == 0 && host == "" && ip == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -819,7 +827,7 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip netip.Prefix) boo
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if ip.IsValid() && ip.Bits() == 0 {
|
if ip != nil && ip.Contains(net.IPv4(0, 0, 0, 0)) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -862,28 +870,22 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range fr.CIDR.Supernets(netip.PrefixFrom(p.RemoteIP, p.RemoteIP.BitLen())) {
|
return fr.CIDR.EachContains(p.RemoteIP, func(flc *firewallLocalCIDR) bool {
|
||||||
if v.match(p, c) {
|
return flc.match(p, c)
|
||||||
return true
|
})
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
|
func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp *net.IPNet) error {
|
||||||
if !localIp.IsValid() {
|
if localIp == nil || (localIp != nil && localIp.Contains(net.IPv4(0, 0, 0, 0))) {
|
||||||
if !f.hasSubnets || f.defaultLocalCIDRAny {
|
if !f.hasSubnets || f.defaultLocalCIDRAny {
|
||||||
flc.Any = true
|
flc.Any = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
localIp = f.assignedCIDR
|
localIp = f.assignedCIDR
|
||||||
} else if localIp.Bits() == 0 {
|
|
||||||
flc.Any = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
flc.LocalCIDR.Insert(localIp, struct{}{})
|
flc.LocalCIDR.AddCIDR(localIp, struct{}{})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -896,7 +898,7 @@ func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.NebulaCertificate
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ok := flc.LocalCIDR.Lookup(p.LocalIP)
|
ok, _ := flc.LocalCIDR.Contains(p.LocalIP)
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1013,3 +1015,42 @@ func parsePort(s string) (startPort, endPort int32, err error) {
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: write tests for these
|
||||||
|
func setTCPRTTTracking(c *conn, p []byte) {
|
||||||
|
if c.Seq != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ihl := int(p[0]&0x0f) << 2
|
||||||
|
|
||||||
|
// Don't track FIN packets
|
||||||
|
if p[ihl+13]&tcpFIN != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Seq = binary.BigEndian.Uint32(p[ihl+4 : ihl+8])
|
||||||
|
c.Sent = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Firewall) checkTCPRTT(c *conn, p []byte) bool {
|
||||||
|
if c.Seq == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ihl := int(p[0]&0x0f) << 2
|
||||||
|
if p[ihl+13]&tcpACK == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deal with wrap around, signed int cuts the ack window in half
|
||||||
|
// 0 is a bad ack, no data acknowledged
|
||||||
|
// positive number is a bad ack, ack is over half the window away
|
||||||
|
if int32(c.Seq-binary.BigEndian.Uint32(p[ihl+8:ihl+12])) >= 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
f.metricTCPRTT.Update(time.Since(c.Sent).Nanoseconds())
|
||||||
|
c.Seq = 0
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,7 +3,8 @@ package firewall
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type m map[string]interface{}
|
type m map[string]interface{}
|
||||||
@@ -19,8 +20,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Packet struct {
|
type Packet struct {
|
||||||
LocalIP netip.Addr
|
LocalIP iputil.VpnIp
|
||||||
RemoteIP netip.Addr
|
RemoteIP iputil.VpnIp
|
||||||
LocalPort uint16
|
LocalPort uint16
|
||||||
RemotePort uint16
|
RemotePort uint16
|
||||||
Protocol uint8
|
Protocol uint8
|
||||||
|
|||||||
274
firewall_test.go
274
firewall_test.go
@@ -2,16 +2,18 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@@ -65,62 +67,59 @@ func TestFirewall_AddRule(t *testing.T) {
|
|||||||
assert.NotNil(t, fw.InRules)
|
assert.NotNil(t, fw.InRules)
|
||||||
assert.NotNil(t, fw.OutRules)
|
assert.NotNil(t, fw.OutRules)
|
||||||
|
|
||||||
ti, err := netip.ParsePrefix("1.2.3.4/32")
|
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", ""))
|
||||||
// An empty rule is any
|
// An empty rule is any
|
||||||
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", ""))
|
||||||
assert.Nil(t, fw.InRules.UDP[1].Any.Any)
|
assert.Nil(t, fw.InRules.UDP[1].Any.Any)
|
||||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0].Groups, "g1")
|
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0].Groups, "g1")
|
||||||
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", ""))
|
||||||
assert.Nil(t, fw.InRules.ICMP[1].Any.Any)
|
assert.Nil(t, fw.InRules.ICMP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
||||||
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", ""))
|
||||||
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
_, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
|
ok, _ := fw.OutRules.AnyProto[1].Any.CIDR.GetCIDR(ti)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", ""))
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
|
ok, _ = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.GetCIDR(ti)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", "ca-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha"))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
anyIp, err := netip.ParsePrefix("0.0.0.0/0")
|
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
||||||
assert.NoError(t, err)
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", ""))
|
||||||
|
|
||||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
|
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||||
|
|
||||||
// Test error conditions
|
// Test error conditions
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, nil, "", ""))
|
||||||
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, nil, "", ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop(t *testing.T) {
|
func TestFirewall_Drop(t *testing.T) {
|
||||||
@@ -129,8 +128,8 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
LocalPort: 10,
|
LocalPort: 10,
|
||||||
RemotePort: 90,
|
RemotePort: 90,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
@@ -155,53 +154,53 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c,
|
peerCert: &c,
|
||||||
},
|
},
|
||||||
vpnIp: netip.MustParseAddr("1.2.3.4"),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
assert.Equal(t, ErrNoMatchingRule, fw.Drop(p, false, &h, cp, nil))
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
// Allow inbound
|
// Allow inbound
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
// Allow outbound because conntrack
|
// Allow outbound because conntrack
|
||||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||||
|
|
||||||
// test remote mismatch
|
// test remote mismatch
|
||||||
oldRemote := p.RemoteIP
|
oldRemote := p.RemoteIP
|
||||||
p.RemoteIP = netip.MustParseAddr("1.2.3.10")
|
p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10))
|
||||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||||
p.RemoteIP = oldRemote
|
p.RemoteIP = oldRemote
|
||||||
|
|
||||||
// ensure signer doesn't get in the way of group checks
|
// ensure signer doesn't get in the way of group checks
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum"))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caSha doesn't drop on match
|
// test caSha doesn't drop on match
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum"))
|
||||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
|
|
||||||
// ensure ca name doesn't get in the way of group checks
|
// ensure ca name doesn't get in the way of group checks
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", ""))
|
||||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caName doesn't drop on match
|
// test caName doesn't drop on match
|
||||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", ""))
|
||||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkFirewallTable_match(b *testing.B) {
|
func BenchmarkFirewallTable_match(b *testing.B) {
|
||||||
@@ -210,9 +209,10 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
TCP: firewallPort{},
|
TCP: firewallPort{},
|
||||||
}
|
}
|
||||||
|
|
||||||
pfix := netip.MustParsePrefix("172.1.1.1/32")
|
_, n, _ := net.ParseCIDR("172.1.1.1/32")
|
||||||
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix, netip.Prefix{}, "", "")
|
goodLocalCIDRIP := iputil.Ip2VpnIp(n.IP)
|
||||||
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix, "", "")
|
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", n, nil, "", "")
|
||||||
|
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", nil, n, "", "")
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
b.Run("fail on proto", func(b *testing.B) {
|
b.Run("fail on proto", func(b *testing.B) {
|
||||||
@@ -233,9 +233,10 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
|
|
||||||
b.Run("pass proto, port, fail on local CIDR", func(b *testing.B) {
|
b.Run("pass proto, port, fail on local CIDR", func(b *testing.B) {
|
||||||
c := &cert.NebulaCertificate{}
|
c := &cert.NebulaCertificate{}
|
||||||
ip := netip.MustParsePrefix("9.254.254.254/32")
|
ip, _, _ := net.ParseCIDR("9.254.254.254/32")
|
||||||
|
lip := iputil.Ip2VpnIp(ip)
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip.Addr()}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: lip}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -263,7 +264,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: goodLocalCIDRIP}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -287,7 +288,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
|
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: goodLocalCIDRIP}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -364,8 +365,8 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
LocalPort: 10,
|
LocalPort: 10,
|
||||||
RemotePort: 90,
|
RemotePort: 90,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
@@ -388,7 +389,7 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c,
|
peerCert: &c,
|
||||||
},
|
},
|
||||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
@@ -407,14 +408,14 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
h1.CreateRemoteCIDR(&c1)
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// h1/c1 lacks the proper groups
|
// h1/c1 lacks the proper groups
|
||||||
assert.Error(t, fw.Drop(p, true, &h1, cp, nil), ErrNoMatchingRule)
|
assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule)
|
||||||
// c has the proper groups
|
// c has the proper groups
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop3(t *testing.T) {
|
func TestFirewall_Drop3(t *testing.T) {
|
||||||
@@ -423,8 +424,8 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
LocalPort: 1,
|
LocalPort: 1,
|
||||||
RemotePort: 1,
|
RemotePort: 1,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
@@ -454,7 +455,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c1,
|
peerCert: &c1,
|
||||||
},
|
},
|
||||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h1.CreateRemoteCIDR(&c1)
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
@@ -469,7 +470,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c2,
|
peerCert: &c2,
|
||||||
},
|
},
|
||||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h2.CreateRemoteCIDR(&c2)
|
h2.CreateRemoteCIDR(&c2)
|
||||||
|
|
||||||
@@ -484,23 +485,23 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c3,
|
peerCert: &c3,
|
||||||
},
|
},
|
||||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h3.CreateRemoteCIDR(&c3)
|
h3.CreateRemoteCIDR(&c3)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, nil, "", ""))
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, nil, "", "signer-sha"))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// c1 should pass because host match
|
// c1 should pass because host match
|
||||||
assert.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil))
|
||||||
// c2 should pass because ca sha match
|
// c2 should pass because ca sha match
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.NoError(t, fw.Drop(p, true, &h2, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil))
|
||||||
// c3 should fail because no match
|
// c3 should fail because no match
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.Equal(t, fw.Drop(p, true, &h3, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_DropConntrackReload(t *testing.T) {
|
func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||||
@@ -509,8 +510,8 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||||
LocalPort: 10,
|
LocalPort: 10,
|
||||||
RemotePort: 90,
|
RemotePort: 90,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
@@ -535,39 +536,39 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c,
|
peerCert: &c,
|
||||||
},
|
},
|
||||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||||
}
|
}
|
||||||
h.CreateRemoteCIDR(&c)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
// Allow inbound
|
// Allow inbound
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||||
// Allow outbound because conntrack
|
// Allow outbound because conntrack
|
||||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||||
|
|
||||||
oldFw := fw
|
oldFw := fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, nil, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
// Allow outbound because conntrack and new rules allow port 10
|
// Allow outbound because conntrack and new rules allow port 10
|
||||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||||
|
|
||||||
oldFw = fw
|
oldFw = fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, nil, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
// Drop outbound because conntrack doesn't match new ruleset
|
// Drop outbound because conntrack doesn't match new ruleset
|
||||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkLookup(b *testing.B) {
|
func BenchmarkLookup(b *testing.B) {
|
||||||
@@ -726,13 +727,13 @@ func TestNewFirewallFromConfig(t *testing.T) {
|
|||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
|
||||||
_, err = NewFirewallFromConfig(l, c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
||||||
|
|
||||||
// Test local_cidr parse error
|
// Test local_cidr parse error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
||||||
_, err = NewFirewallFromConfig(l, c, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; invalid CIDR address: testh")
|
||||||
|
|
||||||
// Test both group and groups
|
// Test both group and groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
@@ -748,78 +749,78 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
|||||||
mf := &mockFirewall{}
|
mf := &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding udp rule
|
// Test adding udp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding icmp rule
|
// Test adding icmp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding any rule
|
// Test adding any rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with cidr
|
// Test adding rule with cidr
|
||||||
cidr := netip.MustParsePrefix("10.0.0.0/8")
|
cidr := &net.IPNet{IP: net.ParseIP("10.0.0.0").To4(), Mask: net.IPv4Mask(255, 0, 0, 0)}
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with local_cidr
|
// Test adding rule with local_cidr
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: cidr}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_sha
|
// Test adding rule with ca_sha
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caSha: "12312313123"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caSha: "12312313123"}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_name
|
// Test adding rule with ca_name
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caName: "root01"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caName: "root01"}, mf.lastCall)
|
||||||
|
|
||||||
// Test single group
|
// Test single group
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test single groups
|
// Test single groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test multiple AND groups
|
// Test multiple AND groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||||
|
|
||||||
// Test Add error
|
// Test Add error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
@@ -829,6 +830,97 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
|||||||
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTCPRTTTracking(t *testing.T) {
|
||||||
|
b := make([]byte, 200)
|
||||||
|
|
||||||
|
// Max ip IHL (60 bytes) and tcp IHL (60 bytes)
|
||||||
|
b[0] = 15
|
||||||
|
b[60+12] = 15 << 4
|
||||||
|
f := Firewall{
|
||||||
|
metricTCPRTT: metrics.GetOrRegisterHistogram("nope", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set SEQ to 1
|
||||||
|
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
|
||||||
|
|
||||||
|
c := &conn{}
|
||||||
|
setTCPRTTTracking(c, b)
|
||||||
|
assert.Equal(t, uint32(1), c.Seq)
|
||||||
|
|
||||||
|
// Bad ack - no ack flag
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
|
||||||
|
assert.False(t, f.checkTCPRTT(c, b))
|
||||||
|
|
||||||
|
// Bad ack, number is too low
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], 0)
|
||||||
|
b[60+13] = uint8(0x10)
|
||||||
|
assert.False(t, f.checkTCPRTT(c, b))
|
||||||
|
|
||||||
|
// Good ack
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
|
||||||
|
assert.True(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, uint32(0), c.Seq)
|
||||||
|
|
||||||
|
// Set SEQ to 1
|
||||||
|
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
|
||||||
|
c = &conn{}
|
||||||
|
setTCPRTTTracking(c, b)
|
||||||
|
assert.Equal(t, uint32(1), c.Seq)
|
||||||
|
|
||||||
|
// Good acks
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
|
||||||
|
assert.True(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, uint32(0), c.Seq)
|
||||||
|
|
||||||
|
// Set SEQ to max uint32 - 20
|
||||||
|
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)-20)
|
||||||
|
c = &conn{}
|
||||||
|
setTCPRTTTracking(c, b)
|
||||||
|
assert.Equal(t, ^uint32(0)-20, c.Seq)
|
||||||
|
|
||||||
|
// Good acks
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
|
||||||
|
assert.True(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, uint32(0), c.Seq)
|
||||||
|
|
||||||
|
// Set SEQ to max uint32 / 2
|
||||||
|
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)/2)
|
||||||
|
c = &conn{}
|
||||||
|
setTCPRTTTracking(c, b)
|
||||||
|
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||||
|
|
||||||
|
// Below
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2-1)
|
||||||
|
assert.False(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||||
|
|
||||||
|
// Halfway below
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], uint32(0))
|
||||||
|
assert.False(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||||
|
|
||||||
|
// Halfway above is ok
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0))
|
||||||
|
assert.True(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, uint32(0), c.Seq)
|
||||||
|
|
||||||
|
// Set SEQ to max uint32
|
||||||
|
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0))
|
||||||
|
c = &conn{}
|
||||||
|
setTCPRTTTracking(c, b)
|
||||||
|
assert.Equal(t, ^uint32(0), c.Seq)
|
||||||
|
|
||||||
|
// Halfway + 1 above
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2+1)
|
||||||
|
assert.False(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, ^uint32(0), c.Seq)
|
||||||
|
|
||||||
|
// Halfway above
|
||||||
|
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2)
|
||||||
|
assert.True(t, f.checkTCPRTT(c, b))
|
||||||
|
assert.Equal(t, uint32(0), c.Seq)
|
||||||
|
}
|
||||||
|
|
||||||
func TestFirewall_convertRule(t *testing.T) {
|
func TestFirewall_convertRule(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
@@ -872,8 +964,8 @@ type addRuleCall struct {
|
|||||||
endPort int32
|
endPort int32
|
||||||
groups []string
|
groups []string
|
||||||
host string
|
host string
|
||||||
ip netip.Prefix
|
ip *net.IPNet
|
||||||
localIp netip.Prefix
|
localIp *net.IPNet
|
||||||
caName string
|
caName string
|
||||||
caSha string
|
caSha string
|
||||||
}
|
}
|
||||||
@@ -883,7 +975,7 @@ type mockFirewall struct {
|
|||||||
nextCallReturn error
|
nextCallReturn error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip netip.Prefix, localIp netip.Prefix, caName string, caSha string) error {
|
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||||
mf.lastCall = addRuleCall{
|
mf.lastCall = addRuleCall{
|
||||||
incoming: incoming,
|
incoming: incoming,
|
||||||
proto: proto,
|
proto: proto,
|
||||||
|
|||||||
34
go.mod
34
go.mod
@@ -1,6 +1,6 @@
|
|||||||
module github.com/slackhq/nebula
|
module github.com/slackhq/nebula
|
||||||
|
|
||||||
go 1.24.0
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.0
|
dario.cat/mergo v1.0.0
|
||||||
@@ -8,45 +8,45 @@ require (
|
|||||||
github.com/armon/go-radix v1.0.0
|
github.com/armon/go-radix v1.0.0
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||||
github.com/flynn/noise v1.1.0
|
github.com/flynn/noise v1.1.0
|
||||||
github.com/gaissmai/bart v0.25.1
|
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/google/gopacket v1.1.19
|
github.com/google/gopacket v1.1.19
|
||||||
github.com/kardianos/service v1.2.2
|
github.com/kardianos/service v1.2.2
|
||||||
github.com/miekg/dns v1.1.61
|
github.com/miekg/dns v1.1.58
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||||
github.com/prometheus/client_golang v1.19.1
|
github.com/prometheus/client_golang v1.18.0
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||||
golang.org/x/crypto v0.43.0
|
golang.org/x/crypto v0.21.0
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
|
||||||
golang.org/x/net v0.45.0
|
golang.org/x/net v0.22.0
|
||||||
golang.org/x/sync v0.8.0
|
golang.org/x/sync v0.6.0
|
||||||
golang.org/x/sys v0.37.0
|
golang.org/x/sys v0.18.0
|
||||||
golang.org/x/term v0.36.0
|
golang.org/x/term v0.18.0
|
||||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||||
google.golang.org/protobuf v1.34.2
|
google.golang.org/protobuf v1.33.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/google/btree v1.1.2 // indirect
|
github.com/google/btree v1.0.1 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/common v0.45.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/vishvananda/netns v0.0.4 // indirect
|
github.com/vishvananda/netns v0.0.4 // indirect
|
||||||
golang.org/x/mod v0.18.0 // indirect
|
golang.org/x/mod v0.14.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
golang.org/x/tools v0.22.0 // indirect
|
golang.org/x/tools v0.17.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
69
go.sum
69
go.sum
@@ -24,8 +24,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
github.com/gaissmai/bart v0.25.1 h1:ctl4nH/za+trcbfmZz9uO9xGBZD684GWaDMsjMSo1l8=
|
|
||||||
github.com/gaissmai/bart v0.25.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
@@ -46,15 +44,14 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
@@ -74,13 +71,14 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
|||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
|
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||||
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
|
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||||
|
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||||
|
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
@@ -98,8 +96,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
|
||||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@@ -108,8 +106,8 @@ github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk
|
|||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
|
||||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
@@ -119,7 +117,6 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c
|
|||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
@@ -149,16 +146,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
|||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@@ -169,8 +166,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
||||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -178,8 +175,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@@ -197,23 +194,23 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@@ -232,8 +229,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
|||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@@ -249,5 +246,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe h1:fre4i6mv4iBuz5lCMOzHD1rH1ljqHWSICFmZRbbgp3g=
|
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f h1:8GE2MRjGiFmfpon8dekPI08jEuNMQzSffVHgdupcO4E=
|
||||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
|
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f/go.mod h1:pzr6sy8gDLfVmDAg8OYrlKvGEHw5C3PGTiBXBTCx76Q=
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOISE IX Handshakes
|
// NOISE IX Handshakes
|
||||||
@@ -45,6 +46,7 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
||||||
|
ci.messageCounter.Add(1)
|
||||||
|
|
||||||
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -62,7 +64,7 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||||
certState := f.pki.GetCertState()
|
certState := f.pki.GetCertState()
|
||||||
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
|
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
|
||||||
// Mark packet 1 as seen so it doesn't show up as missed
|
// Mark packet 1 as seen so it doesn't show up as missed
|
||||||
@@ -88,36 +90,17 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
|
|
||||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
||||||
|
Info("Invalid certificate from host")
|
||||||
if f.l.Level > logrus.DebugLevel {
|
|
||||||
e = e.WithField("cert", remoteCert)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Info("Invalid certificate from host")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
|
||||||
vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
|
|
||||||
if !ok {
|
|
||||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
|
|
||||||
|
|
||||||
if f.l.Level > logrus.DebugLevel {
|
|
||||||
e = e.WithField("cert", remoteCert)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Info("Invalid vpn ip from host")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
vpnIp = vpnIp.Unmap()
|
|
||||||
certName := remoteCert.Details.Name
|
certName := remoteCert.Details.Name
|
||||||
fingerprint, _ := remoteCert.Sha256Sum()
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
issuer := remoteCert.Details.Issuer
|
issuer := remoteCert.Details.Issuer
|
||||||
|
|
||||||
if vpnIp == f.myVpnNet.Addr() {
|
if vpnIp == f.myVpnIp {
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
@@ -126,8 +109,8 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if addr.IsValid() {
|
if addr != nil {
|
||||||
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.Addr()) {
|
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.IP) {
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -151,8 +134,8 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
lastHandshakeTime: hs.Details.Time,
|
lastHandshakeTime: hs.Details.Time,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: map[iputil.VpnIp]struct{}{},
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -231,7 +214,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
|
|
||||||
msg = existing.HandshakePacket[2]
|
msg = existing.HandshakePacket[2]
|
||||||
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||||
if addr.IsValid() {
|
if addr != nil {
|
||||||
err := f.outside.WriteTo(msg, addr)
|
err := f.outside.WriteTo(msg, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
|
||||||
@@ -297,7 +280,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
|
|
||||||
// Do the send
|
// Do the send
|
||||||
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||||
if addr.IsValid() {
|
if addr != nil {
|
||||||
err = f.outside.WriteTo(msg, addr)
|
err = f.outside.WriteTo(msg, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
@@ -322,9 +305,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
// I successfully received a handshake. Just in case I marked this tunnel as 'Disestablished', ensure
|
|
||||||
// it's correctly marked as working.
|
|
||||||
via.relayHI.relayState.UpdateRelayForByIdxState(via.remoteIdx, Established)
|
|
||||||
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
@@ -335,14 +315,14 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.AddTrafficWatch(hostinfo)
|
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||||
|
hostinfo.ConnectionState.messageCounter.Store(2)
|
||||||
hostinfo.remotes.ResetBlockedRemotes()
|
hostinfo.remotes.ResetBlockedRemotes()
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
|
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
|
||||||
if hh == nil {
|
if hh == nil {
|
||||||
// Nothing here to tear down, got a bogus stage 2 packet
|
// Nothing here to tear down, got a bogus stage 2 packet
|
||||||
return true
|
return true
|
||||||
@@ -352,8 +332,8 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
defer hh.Unlock()
|
defer hh.Unlock()
|
||||||
|
|
||||||
hostinfo := hh.hostinfo
|
hostinfo := hh.hostinfo
|
||||||
if addr.IsValid() {
|
if addr != nil {
|
||||||
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.Addr()) {
|
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
|
||||||
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -392,33 +372,15 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
|
|
||||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e := f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
|
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
|
Error("Invalid certificate from host")
|
||||||
if f.l.Level > logrus.DebugLevel {
|
|
||||||
e = e.WithField("cert", remoteCert)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Error("Invalid certificate from host")
|
|
||||||
|
|
||||||
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
|
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
|
||||||
if !ok {
|
|
||||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
|
|
||||||
|
|
||||||
if f.l.Level > logrus.DebugLevel {
|
|
||||||
e = e.WithField("cert", remoteCert)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Info("Invalid vpn ip from host")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
vpnIp = vpnIp.Unmap()
|
|
||||||
certName := remoteCert.Details.Name
|
certName := remoteCert.Details.Name
|
||||||
fingerprint, _ := remoteCert.Sha256Sum()
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
issuer := remoteCert.Details.Issuer
|
issuer := remoteCert.Details.Issuer
|
||||||
@@ -444,7 +406,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||||
|
|
||||||
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||||
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.GetPreferredRanges())).
|
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
|
||||||
Info("Blocked addresses for handshakes")
|
Info("Blocked addresses for handshakes")
|
||||||
|
|
||||||
// Swap the packet store to benefit the original intended recipient
|
// Swap the packet store to benefit the original intended recipient
|
||||||
@@ -482,7 +444,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
ci.eKey = NewNebulaCipherState(eKey)
|
ci.eKey = NewNebulaCipherState(eKey)
|
||||||
|
|
||||||
// Make sure the current udpAddr being used is set for responding
|
// Make sure the current udpAddr being used is set for responding
|
||||||
if addr.IsValid() {
|
if addr != nil {
|
||||||
hostinfo.SetRemote(addr)
|
hostinfo.SetRemote(addr)
|
||||||
} else {
|
} else {
|
||||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
@@ -493,7 +455,9 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
|
|
||||||
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||||
f.handshakeManager.Complete(hostinfo, f)
|
f.handshakeManager.Complete(hostinfo, f)
|
||||||
f.connectionManager.AddTrafficWatch(hostinfo)
|
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||||
|
|
||||||
|
hostinfo.ConnectionState.messageCounter.Store(2)
|
||||||
|
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
||||||
|
|||||||
@@ -6,15 +6,15 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"net/netip"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -35,7 +35,7 @@ var (
|
|||||||
|
|
||||||
type HandshakeConfig struct {
|
type HandshakeConfig struct {
|
||||||
tryInterval time.Duration
|
tryInterval time.Duration
|
||||||
retries int64
|
retries int
|
||||||
triggerBuffer int
|
triggerBuffer int
|
||||||
useRelays bool
|
useRelays bool
|
||||||
|
|
||||||
@@ -46,14 +46,14 @@ type HandshakeManager struct {
|
|||||||
// Mutex for interacting with the vpnIps and indexes maps
|
// Mutex for interacting with the vpnIps and indexes maps
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
vpnIps map[netip.Addr]*HandshakeHostInfo
|
vpnIps map[iputil.VpnIp]*HandshakeHostInfo
|
||||||
indexes map[uint32]*HandshakeHostInfo
|
indexes map[uint32]*HandshakeHostInfo
|
||||||
|
|
||||||
mainHostMap *HostMap
|
mainHostMap *HostMap
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
outside udp.Conn
|
outside udp.Conn
|
||||||
config HandshakeConfig
|
config HandshakeConfig
|
||||||
OutboundHandshakeTimer *LockingTimerWheel[netip.Addr]
|
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
|
||||||
messageMetrics *MessageMetrics
|
messageMetrics *MessageMetrics
|
||||||
metricInitiated metrics.Counter
|
metricInitiated metrics.Counter
|
||||||
metricTimedOut metrics.Counter
|
metricTimedOut metrics.Counter
|
||||||
@@ -61,17 +61,17 @@ type HandshakeManager struct {
|
|||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
|
|
||||||
// can be used to trigger outbound handshake for the given vpnIp
|
// can be used to trigger outbound handshake for the given vpnIp
|
||||||
trigger chan netip.Addr
|
trigger chan iputil.VpnIp
|
||||||
}
|
}
|
||||||
|
|
||||||
type HandshakeHostInfo struct {
|
type HandshakeHostInfo struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
startTime time.Time // Time that we first started trying with this handshake
|
startTime time.Time // Time that we first started trying with this handshake
|
||||||
ready bool // Is the handshake ready
|
ready bool // Is the handshake ready
|
||||||
counter int64 // How many attempts have we made so far
|
counter int // How many attempts have we made so far
|
||||||
lastRemotes []netip.AddrPort // Remotes that we sent to during the previous attempt
|
lastRemotes []*udp.Addr // Remotes that we sent to during the previous attempt
|
||||||
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
||||||
|
|
||||||
hostinfo *HostInfo
|
hostinfo *HostInfo
|
||||||
}
|
}
|
||||||
@@ -103,14 +103,14 @@ func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType,
|
|||||||
|
|
||||||
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||||
return &HandshakeManager{
|
return &HandshakeManager{
|
||||||
vpnIps: map[netip.Addr]*HandshakeHostInfo{},
|
vpnIps: map[iputil.VpnIp]*HandshakeHostInfo{},
|
||||||
indexes: map[uint32]*HandshakeHostInfo{},
|
indexes: map[uint32]*HandshakeHostInfo{},
|
||||||
mainHostMap: mainHostMap,
|
mainHostMap: mainHostMap,
|
||||||
lightHouse: lightHouse,
|
lightHouse: lightHouse,
|
||||||
outside: outside,
|
outside: outside,
|
||||||
config: config,
|
config: config,
|
||||||
trigger: make(chan netip.Addr, config.triggerBuffer),
|
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
|
||||||
OutboundHandshakeTimer: NewLockingTimerWheel[netip.Addr](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
||||||
messageMetrics: config.messageMetrics,
|
messageMetrics: config.messageMetrics,
|
||||||
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
||||||
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
||||||
@@ -134,10 +134,10 @@ func (c *HandshakeManager) Run(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) HandleIncoming(addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
func (hm *HandshakeManager) HandleIncoming(addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||||
// First remote allow list check before we know the vpnIp
|
// First remote allow list check before we know the vpnIp
|
||||||
if addr.IsValid() {
|
if addr != nil {
|
||||||
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.Addr()) {
|
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
||||||
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -170,7 +170,7 @@ func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered bool) {
|
func (hm *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, lighthouseTriggered bool) {
|
||||||
hh := hm.queryVpnIp(vpnIp)
|
hh := hm.queryVpnIp(vpnIp)
|
||||||
if hh == nil {
|
if hh == nil {
|
||||||
return
|
return
|
||||||
@@ -181,7 +181,7 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
hostinfo := hh.hostinfo
|
hostinfo := hh.hostinfo
|
||||||
// If we are out of time, clean up
|
// If we are out of time, clean up
|
||||||
if hh.counter >= hm.config.retries {
|
if hh.counter >= hm.config.retries {
|
||||||
hh.hostinfo.logger(hm.l).WithField("udpAddrs", hh.hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())).
|
hh.hostinfo.logger(hm.l).WithField("udpAddrs", hh.hostinfo.remotes.CopyAddrs(hm.mainHostMap.preferredRanges)).
|
||||||
WithField("initiatorIndex", hh.hostinfo.localIndexId).
|
WithField("initiatorIndex", hh.hostinfo.localIndexId).
|
||||||
WithField("remoteIndex", hh.hostinfo.remoteIndexId).
|
WithField("remoteIndex", hh.hostinfo.remoteIndexId).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
@@ -211,8 +211,8 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
|
hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())
|
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.preferredRanges)
|
||||||
remotesHaveChanged := !slices.Equal(remotes, hh.lastRemotes)
|
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hh.lastRemotes)
|
||||||
|
|
||||||
// We only care about a lighthouse trigger if we have new remotes to send to.
|
// We only care about a lighthouse trigger if we have new remotes to send to.
|
||||||
// This is a very specific optimization for a fast lighthouse reply.
|
// This is a very specific optimization for a fast lighthouse reply.
|
||||||
@@ -234,8 +234,8 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
||||||
var sentTo []netip.AddrPort
|
var sentTo []*udp.Addr
|
||||||
hostinfo.remotes.ForEach(hm.mainHostMap.GetPreferredRanges(), func(addr netip.AddrPort, _ bool) {
|
hostinfo.remotes.ForEach(hm.mainHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
||||||
hm.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
hm.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||||
err := hm.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
err := hm.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -268,34 +268,65 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
// Send a RelayRequest to all known Relay IP's
|
// Send a RelayRequest to all known Relay IP's
|
||||||
for _, relay := range hostinfo.remotes.relays {
|
for _, relay := range hostinfo.remotes.relays {
|
||||||
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
||||||
if relay == vpnIp || relay == hm.lightHouse.myVpnNet.Addr() {
|
if *relay == vpnIp || *relay == hm.lightHouse.myVpnIp {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
relayHostInfo := hm.mainHostMap.QueryVpnIp(relay)
|
relayHostInfo := hm.mainHostMap.QueryVpnIp(*relay)
|
||||||
if relayHostInfo == nil || !relayHostInfo.remote.IsValid() {
|
if relayHostInfo == nil || relayHostInfo.remote == nil {
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||||
hm.f.Handshake(relay)
|
hm.f.Handshake(*relay)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Check the relay HostInfo to see if we already established a relay through it
|
// Check the relay HostInfo to see if we already established a relay through it
|
||||||
existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp)
|
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
|
||||||
if !ok {
|
switch existingRelay.State {
|
||||||
|
case Established:
|
||||||
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||||
|
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
||||||
|
case Requested:
|
||||||
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||||
|
// Re-send the CreateRelay request, in case the previous one was lost.
|
||||||
|
m := NebulaControl{
|
||||||
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
|
InitiatorRelayIndex: existingRelay.LocalIndex,
|
||||||
|
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
|
||||||
|
RelayToIp: uint32(vpnIp),
|
||||||
|
}
|
||||||
|
msg, err := m.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
hostinfo.logger(hm.l).
|
||||||
|
WithError(err).
|
||||||
|
Error("Failed to marshal Control message to create relay")
|
||||||
|
} else {
|
||||||
|
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||||
|
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
hm.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": hm.lightHouse.myVpnIp,
|
||||||
|
"relayTo": vpnIp,
|
||||||
|
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||||
|
"relay": *relay}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
hostinfo.logger(hm.l).
|
||||||
|
WithField("vpnIp", vpnIp).
|
||||||
|
WithField("state", existingRelay.State).
|
||||||
|
WithField("relay", relayHostInfo.vpnIp).
|
||||||
|
Errorf("Relay unexpected state")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
// No relays exist or requested yet.
|
// No relays exist or requested yet.
|
||||||
if relayHostInfo.remote.IsValid() {
|
if relayHostInfo.remote != nil {
|
||||||
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: IPV6-WORK
|
|
||||||
myVpnIpB := hm.f.myVpnNet.Addr().As4()
|
|
||||||
theirVpnIpB := vpnIp.As4()
|
|
||||||
|
|
||||||
m := NebulaControl{
|
m := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
InitiatorRelayIndex: idx,
|
InitiatorRelayIndex: idx,
|
||||||
RelayFromIp: binary.BigEndian.Uint32(myVpnIpB[:]),
|
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
|
||||||
RelayToIp: binary.BigEndian.Uint32(theirVpnIpB[:]),
|
RelayToIp: uint32(vpnIp),
|
||||||
}
|
}
|
||||||
msg, err := m.Marshal()
|
msg, err := m.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -305,59 +336,13 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
} else {
|
} else {
|
||||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
hm.l.WithFields(logrus.Fields{
|
hm.l.WithFields(logrus.Fields{
|
||||||
"relayFrom": hm.f.myVpnNet.Addr(),
|
"relayFrom": hm.lightHouse.myVpnIp,
|
||||||
"relayTo": vpnIp,
|
"relayTo": vpnIp,
|
||||||
"initiatorRelayIndex": idx,
|
"initiatorRelayIndex": idx,
|
||||||
"relay": relay}).
|
"relay": *relay}).
|
||||||
Info("send CreateRelayRequest")
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch existingRelay.State {
|
|
||||||
case Established:
|
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
|
||||||
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
|
||||||
case Disestablished:
|
|
||||||
// Mark this relay as 'requested'
|
|
||||||
relayHostInfo.relayState.UpdateRelayForByIpState(vpnIp, Requested)
|
|
||||||
fallthrough
|
|
||||||
case Requested:
|
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
|
||||||
// Re-send the CreateRelay request, in case the previous one was lost.
|
|
||||||
relayFrom := hm.f.myVpnNet.Addr().As4()
|
|
||||||
relayTo := vpnIp.As4()
|
|
||||||
m := NebulaControl{
|
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
|
||||||
InitiatorRelayIndex: existingRelay.LocalIndex,
|
|
||||||
RelayFromIp: binary.BigEndian.Uint32(relayFrom[:]),
|
|
||||||
RelayToIp: binary.BigEndian.Uint32(relayTo[:]),
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, err := m.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
hostinfo.logger(hm.l).
|
|
||||||
WithError(err).
|
|
||||||
Error("Failed to marshal Control message to create relay")
|
|
||||||
} else {
|
|
||||||
// This must send over the hostinfo, not over hm.Hosts[ip]
|
|
||||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
|
||||||
hm.l.WithFields(logrus.Fields{
|
|
||||||
"relayFrom": hm.f.myVpnNet,
|
|
||||||
"relayTo": vpnIp,
|
|
||||||
"initiatorRelayIndex": existingRelay.LocalIndex,
|
|
||||||
"relay": relay}).
|
|
||||||
Info("send CreateRelayRequest")
|
|
||||||
}
|
|
||||||
case PeerRequested:
|
|
||||||
// PeerRequested only occurs in Forwarding relays, not Terminal relays, and this is a Terminal relay case.
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
hostinfo.logger(hm.l).
|
|
||||||
WithField("vpnIp", vpnIp).
|
|
||||||
WithField("state", existingRelay.State).
|
|
||||||
WithField("relay", relay).
|
|
||||||
Errorf("Relay unexpected state")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -370,32 +355,32 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
|
|
||||||
// GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
|
// GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
|
||||||
// The 2nd argument will be true if the hostinfo is ready to transmit traffic
|
// The 2nd argument will be true if the hostinfo is ready to transmit traffic
|
||||||
func (hm *HandshakeManager) GetOrHandshake(vpnIp netip.Addr, cacheCb func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
func (hm *HandshakeManager) GetOrHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||||
|
// Check the main hostmap and maintain a read lock if our host is not there
|
||||||
hm.mainHostMap.RLock()
|
hm.mainHostMap.RLock()
|
||||||
h, ok := hm.mainHostMap.Hosts[vpnIp]
|
if h, ok := hm.mainHostMap.Hosts[vpnIp]; ok {
|
||||||
hm.mainHostMap.RUnlock()
|
hm.mainHostMap.RUnlock()
|
||||||
|
|
||||||
if ok {
|
|
||||||
// Do not attempt promotion if you are a lighthouse
|
// Do not attempt promotion if you are a lighthouse
|
||||||
if !hm.lightHouse.amLighthouse {
|
if !hm.lightHouse.amLighthouse {
|
||||||
h.TryPromoteBest(hm.mainHostMap.GetPreferredRanges(), hm.f)
|
h.TryPromoteBest(hm.mainHostMap.preferredRanges, hm.f)
|
||||||
}
|
}
|
||||||
return h, true
|
return h, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer hm.mainHostMap.RUnlock()
|
||||||
return hm.StartHandshake(vpnIp, cacheCb), false
|
return hm.StartHandshake(vpnIp, cacheCb), false
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
||||||
func (hm *HandshakeManager) StartHandshake(vpnIp netip.Addr, cacheCb func(*HandshakeHostInfo)) *HostInfo {
|
func (hm *HandshakeManager) StartHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) *HostInfo {
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
|
defer hm.Unlock()
|
||||||
|
|
||||||
if hh, ok := hm.vpnIps[vpnIp]; ok {
|
if hh, ok := hm.vpnIps[vpnIp]; ok {
|
||||||
// We are already trying to handshake with this vpn ip
|
// We are already trying to handshake with this vpn ip
|
||||||
if cacheCb != nil {
|
if cacheCb != nil {
|
||||||
cacheCb(hh)
|
cacheCb(hh)
|
||||||
}
|
}
|
||||||
hm.Unlock()
|
|
||||||
return hh.hostinfo
|
return hh.hostinfo
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -403,8 +388,8 @@ func (hm *HandshakeManager) StartHandshake(vpnIp netip.Addr, cacheCb func(*Hands
|
|||||||
vpnIp: vpnIp,
|
vpnIp: vpnIp,
|
||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: map[iputil.VpnIp]struct{}{},
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -436,7 +421,6 @@ func (hm *HandshakeManager) StartHandshake(vpnIp netip.Addr, cacheCb func(*Hands
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hm.Unlock()
|
|
||||||
hm.lightHouse.QueryServer(vpnIp)
|
hm.lightHouse.QueryServer(vpnIp)
|
||||||
return hostinfo
|
return hostinfo
|
||||||
}
|
}
|
||||||
@@ -494,7 +478,7 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
|||||||
existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
|
existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
|
||||||
if found && existingPendingIndex.hostinfo != hostinfo {
|
if found && existingPendingIndex.hostinfo != hostinfo {
|
||||||
// We have a collision, but for a different hostinfo
|
// We have a collision, but for a different hostinfo
|
||||||
return existingPendingIndex.hostinfo, ErrLocalIndexCollision
|
return existingIndex, ErrLocalIndexCollision
|
||||||
}
|
}
|
||||||
|
|
||||||
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
@@ -570,7 +554,7 @@ func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
|||||||
func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
delete(c.vpnIps, hostinfo.vpnIp)
|
delete(c.vpnIps, hostinfo.vpnIp)
|
||||||
if len(c.vpnIps) == 0 {
|
if len(c.vpnIps) == 0 {
|
||||||
c.vpnIps = map[netip.Addr]*HandshakeHostInfo{}
|
c.vpnIps = map[iputil.VpnIp]*HandshakeHostInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(c.indexes, hostinfo.localIndexId)
|
delete(c.indexes, hostinfo.localIndexId)
|
||||||
@@ -585,7 +569,7 @@ func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
func (hm *HandshakeManager) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
||||||
hh := hm.queryVpnIp(vpnIp)
|
hh := hm.queryVpnIp(vpnIp)
|
||||||
if hh != nil {
|
if hh != nil {
|
||||||
return hh.hostinfo
|
return hh.hostinfo
|
||||||
@@ -594,7 +578,7 @@ func (hm *HandshakeManager) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) queryVpnIp(vpnIp netip.Addr) *HandshakeHostInfo {
|
func (hm *HandshakeManager) queryVpnIp(vpnIp iputil.VpnIp) *HandshakeHostInfo {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
defer hm.RUnlock()
|
defer hm.RUnlock()
|
||||||
return hm.vpnIps[vpnIp]
|
return hm.vpnIps[vpnIp]
|
||||||
@@ -614,8 +598,8 @@ func (hm *HandshakeManager) queryIndex(index uint32) *HandshakeHostInfo {
|
|||||||
return hm.indexes[index]
|
return hm.indexes[index]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) GetPreferredRanges() []netip.Prefix {
|
func (c *HandshakeManager) GetPreferredRanges() []*net.IPNet {
|
||||||
return c.mainHostMap.GetPreferredRanges()
|
return c.mainHostMap.preferredRanges
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
|
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
|
||||||
@@ -671,6 +655,6 @@ func generateIndex(l *logrus.Logger) (uint32, error) {
|
|||||||
return index, nil
|
return index, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hsTimeout(tries int64, interval time.Duration) time.Duration {
|
func hsTimeout(tries int, interval time.Duration) time.Duration {
|
||||||
return time.Duration(tries / 2 * ((2 * int64(interval)) + (tries-1)*int64(interval)))
|
return time.Duration(tries / 2 * ((2 * int(interval)) + (tries-1)*int(interval)))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -14,14 +15,11 @@ import (
|
|||||||
|
|
||||||
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||||
ip := netip.MustParseAddr("172.1.1.2")
|
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||||
|
preferredRanges := []*net.IPNet{localrange}
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
mainHM := NewHostMap(l, vpncidr, preferredRanges)
|
||||||
mainHM := newHostMap(l, vpncidr)
|
|
||||||
mainHM.preferredRanges.Store(&preferredRanges)
|
|
||||||
|
|
||||||
lh := newTestLighthouse()
|
lh := newTestLighthouse()
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
@@ -66,7 +64,7 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
|||||||
assert.NotContains(t, blah.vpnIps, ip)
|
assert.NotContains(t, blah.vpnIps, ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCountTimerWheelEntries(tw *LockingTimerWheel[netip.Addr]) (c int) {
|
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
|
||||||
for _, i := range tw.t.wheel {
|
for _, i := range tw.t.wheel {
|
||||||
n := i.Head
|
n := i.Head
|
||||||
for n != nil {
|
for n != nil {
|
||||||
@@ -80,7 +78,7 @@ func testCountTimerWheelEntries(tw *LockingTimerWheel[netip.Addr]) (c int) {
|
|||||||
type mockEncWriter struct {
|
type mockEncWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte) {
|
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,4 +90,4 @@ func (mw *mockEncWriter) SendMessageToHostInfo(t header.MessageType, st header.M
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) Handshake(vpnIP netip.Addr) {}
|
func (mw *mockEncWriter) Handshake(vpnIP iputil.VpnIp) {}
|
||||||
|
|||||||
288
hostmap.go
288
hostmap.go
@@ -3,18 +3,17 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"slices"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// const ProbeLen = 100
|
// const ProbeLen = 100
|
||||||
@@ -22,6 +21,7 @@ const defaultPromoteEvery = 1000 // Count of packets sent before we try mo
|
|||||||
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
||||||
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
||||||
const MaxRemotes = 10
|
const MaxRemotes = 10
|
||||||
|
const maxRecvError = 4
|
||||||
|
|
||||||
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
||||||
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
||||||
@@ -35,7 +35,6 @@ const (
|
|||||||
Requested = iota
|
Requested = iota
|
||||||
PeerRequested
|
PeerRequested
|
||||||
Established
|
Established
|
||||||
Disestablished
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -49,7 +48,7 @@ type Relay struct {
|
|||||||
State int
|
State int
|
||||||
LocalIndex uint32
|
LocalIndex uint32
|
||||||
RemoteIndex uint32
|
RemoteIndex uint32
|
||||||
PeerIp netip.Addr
|
PeerIp iputil.VpnIp
|
||||||
}
|
}
|
||||||
|
|
||||||
type HostMap struct {
|
type HostMap struct {
|
||||||
@@ -57,9 +56,10 @@ type HostMap struct {
|
|||||||
Indexes map[uint32]*HostInfo
|
Indexes map[uint32]*HostInfo
|
||||||
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
||||||
RemoteIndexes map[uint32]*HostInfo
|
RemoteIndexes map[uint32]*HostInfo
|
||||||
Hosts map[netip.Addr]*HostInfo
|
Hosts map[iputil.VpnIp]*HostInfo
|
||||||
preferredRanges atomic.Pointer[[]netip.Prefix]
|
preferredRanges []*net.IPNet
|
||||||
vpnCIDR netip.Prefix
|
vpnCIDR *net.IPNet
|
||||||
|
metricsEnabled bool
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,42 +69,15 @@ type HostMap struct {
|
|||||||
type RelayState struct {
|
type RelayState struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
relays []netip.Addr // Ordered set of VpnIp's of Hosts to use as relays to access this peer
|
relays map[iputil.VpnIp]struct{} // Set of VpnIp's of Hosts to use as relays to access this peer
|
||||||
relayForByIp map[netip.Addr]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
relayForByIp map[iputil.VpnIp]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
||||||
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
func (rs *RelayState) DeleteRelay(ip iputil.VpnIp) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
for idx, val := range rs.relays {
|
delete(rs.relays, ip)
|
||||||
if val == ip {
|
|
||||||
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
|
||||||
rs.Lock()
|
|
||||||
defer rs.Unlock()
|
|
||||||
if r, ok := rs.relayForByIp[vpnIp]; ok {
|
|
||||||
newRelay := *r
|
|
||||||
newRelay.State = state
|
|
||||||
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
|
||||||
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *RelayState) UpdateRelayForByIdxState(idx uint32, state int) {
|
|
||||||
rs.Lock()
|
|
||||||
defer rs.Unlock()
|
|
||||||
if r, ok := rs.relayForByIdx[idx]; ok {
|
|
||||||
newRelay := *r
|
|
||||||
newRelay.State = state
|
|
||||||
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
|
||||||
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
||||||
@@ -117,33 +90,33 @@ func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) GetRelayForByIp(ip netip.Addr) (*Relay, bool) {
|
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
r, ok := rs.relayForByIp[ip]
|
r, ok := rs.relayForByIp[ip]
|
||||||
return r, ok
|
return r, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
func (rs *RelayState) InsertRelayTo(ip iputil.VpnIp) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
if !slices.Contains(rs.relays, ip) {
|
rs.relays[ip] = struct{}{}
|
||||||
rs.relays = append(rs.relays, ip)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
func (rs *RelayState) CopyRelayIps() []iputil.VpnIp {
|
||||||
ret := make([]netip.Addr, len(rs.relays))
|
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
copy(ret, rs.relays)
|
ret := make([]iputil.VpnIp, 0, len(rs.relays))
|
||||||
|
for ip := range rs.relays {
|
||||||
|
ret = append(ret, ip)
|
||||||
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) CopyRelayForIps() []netip.Addr {
|
func (rs *RelayState) CopyRelayForIps() []iputil.VpnIp {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
currentRelays := make([]netip.Addr, 0, len(rs.relayForByIp))
|
currentRelays := make([]iputil.VpnIp, 0, len(rs.relayForByIp))
|
||||||
for relayIp := range rs.relayForByIp {
|
for relayIp := range rs.relayForByIp {
|
||||||
currentRelays = append(currentRelays, relayIp)
|
currentRelays = append(currentRelays, relayIp)
|
||||||
}
|
}
|
||||||
@@ -160,7 +133,19 @@ func (rs *RelayState) CopyRelayForIdxs() []uint32 {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) CompleteRelayByIP(vpnIp netip.Addr, remoteIdx uint32) bool {
|
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) {
|
||||||
|
rs.Lock()
|
||||||
|
defer rs.Unlock()
|
||||||
|
r, ok := rs.relayForByIdx[localIdx]
|
||||||
|
if !ok {
|
||||||
|
return iputil.VpnIp(0), false
|
||||||
|
}
|
||||||
|
delete(rs.relayForByIdx, localIdx)
|
||||||
|
delete(rs.relayForByIp, r.PeerIp)
|
||||||
|
return r.PeerIp, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) CompleteRelayByIP(vpnIp iputil.VpnIp, remoteIdx uint32) bool {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
r, ok := rs.relayForByIp[vpnIp]
|
r, ok := rs.relayForByIp[vpnIp]
|
||||||
@@ -190,7 +175,7 @@ func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Re
|
|||||||
return &newRelay, true
|
return &newRelay, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) QueryRelayForByIp(vpnIp netip.Addr) (*Relay, bool) {
|
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
r, ok := rs.relayForByIp[vpnIp]
|
r, ok := rs.relayForByIp[vpnIp]
|
||||||
@@ -204,7 +189,7 @@ func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
|
|||||||
return r, ok
|
return r, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
|
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
rs.relayForByIp[ip] = r
|
rs.relayForByIp[ip] = r
|
||||||
@@ -212,14 +197,15 @@ func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HostInfo struct {
|
type HostInfo struct {
|
||||||
remote netip.AddrPort
|
remote *udp.Addr
|
||||||
remotes *RemoteList
|
remotes *RemoteList
|
||||||
promoteCounter atomic.Uint32
|
promoteCounter atomic.Uint32
|
||||||
ConnectionState *ConnectionState
|
ConnectionState *ConnectionState
|
||||||
remoteIndexId uint32
|
remoteIndexId uint32
|
||||||
localIndexId uint32
|
localIndexId uint32
|
||||||
vpnIp netip.Addr
|
vpnIp iputil.VpnIp
|
||||||
remoteCidr *bart.Table[struct{}]
|
recvError atomic.Uint32
|
||||||
|
remoteCidr *cidr.Tree4[struct{}]
|
||||||
relayState RelayState
|
relayState RelayState
|
||||||
|
|
||||||
// HandshakePacket records the packets used to create this hostinfo
|
// HandshakePacket records the packets used to create this hostinfo
|
||||||
@@ -241,19 +227,11 @@ type HostInfo struct {
|
|||||||
lastHandshakeTime uint64
|
lastHandshakeTime uint64
|
||||||
|
|
||||||
lastRoam time.Time
|
lastRoam time.Time
|
||||||
lastRoamRemote netip.AddrPort
|
lastRoamRemote *udp.Addr
|
||||||
|
|
||||||
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
||||||
// Synchronised via hostmap lock and not the hostinfo lock.
|
// Synchronised via hostmap lock and not the hostinfo lock.
|
||||||
next, prev *HostInfo
|
next, prev *HostInfo
|
||||||
|
|
||||||
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
|
|
||||||
in, out, pendingDeletion atomic.Bool
|
|
||||||
|
|
||||||
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
|
|
||||||
// This value will be behind against actual tunnel utilization in the hot path.
|
|
||||||
// This should only be used by the ConnectionManagers ticker routine.
|
|
||||||
lastUsed time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ViaSender struct {
|
type ViaSender struct {
|
||||||
@@ -276,53 +254,21 @@ type cachedPacketMetrics struct {
|
|||||||
dropped metrics.Counter
|
dropped metrics.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHostMapFromConfig(l *logrus.Logger, vpnCIDR netip.Prefix, c *config.C) *HostMap {
|
func NewHostMap(l *logrus.Logger, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
|
||||||
hm := newHostMap(l, vpnCIDR)
|
h := map[iputil.VpnIp]*HostInfo{}
|
||||||
|
i := map[uint32]*HostInfo{}
|
||||||
hm.reload(c, true)
|
r := map[uint32]*HostInfo{}
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
relays := map[uint32]*HostInfo{}
|
||||||
hm.reload(c, false)
|
m := HostMap{
|
||||||
})
|
Indexes: i,
|
||||||
|
Relays: relays,
|
||||||
l.WithField("network", hm.vpnCIDR.String()).
|
RemoteIndexes: r,
|
||||||
WithField("preferredRanges", hm.GetPreferredRanges()).
|
Hosts: h,
|
||||||
Info("Main HostMap created")
|
preferredRanges: preferredRanges,
|
||||||
|
vpnCIDR: vpnCIDR,
|
||||||
return hm
|
l: l,
|
||||||
}
|
|
||||||
|
|
||||||
func newHostMap(l *logrus.Logger, vpnCIDR netip.Prefix) *HostMap {
|
|
||||||
return &HostMap{
|
|
||||||
Indexes: map[uint32]*HostInfo{},
|
|
||||||
Relays: map[uint32]*HostInfo{},
|
|
||||||
RemoteIndexes: map[uint32]*HostInfo{},
|
|
||||||
Hosts: map[netip.Addr]*HostInfo{},
|
|
||||||
vpnCIDR: vpnCIDR,
|
|
||||||
l: l,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) reload(c *config.C, initial bool) {
|
|
||||||
if initial || c.HasChanged("preferred_ranges") {
|
|
||||||
var preferredRanges []netip.Prefix
|
|
||||||
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
|
|
||||||
|
|
||||||
for _, rawPreferredRange := range rawPreferredRanges {
|
|
||||||
preferredRange, err := netip.ParsePrefix(rawPreferredRange)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
hm.l.WithError(err).WithField("range", rawPreferredRanges).Warn("Failed to parse preferred ranges, ignoring")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
preferredRanges = append(preferredRanges, preferredRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
oldRanges := hm.preferredRanges.Swap(&preferredRanges)
|
|
||||||
if !initial {
|
|
||||||
hm.l.WithField("oldPreferredRanges", *oldRanges).WithField("newPreferredRanges", preferredRanges).Info("preferred_ranges changed")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return &m
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmitStats reports host, index, and relay counts to the stats collection system
|
// EmitStats reports host, index, and relay counts to the stats collection system
|
||||||
@@ -396,12 +342,11 @@ func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
|||||||
|
|
||||||
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
||||||
isLastHostinfo := hostinfo.next == nil && hostinfo.prev == nil
|
|
||||||
if ok && primary == hostinfo {
|
if ok && primary == hostinfo {
|
||||||
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
||||||
delete(hm.Hosts, hostinfo.vpnIp)
|
delete(hm.Hosts, hostinfo.vpnIp)
|
||||||
if len(hm.Hosts) == 0 {
|
if len(hm.Hosts) == 0 {
|
||||||
hm.Hosts = map[netip.Addr]*HostInfo{}
|
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hostinfo.next != nil {
|
if hostinfo.next != nil {
|
||||||
@@ -446,12 +391,6 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
|||||||
Debug("Hostmap hostInfo deleted")
|
Debug("Hostmap hostInfo deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
if isLastHostinfo {
|
|
||||||
// I have lost connectivity to my peers. My relay tunnel is likely broken. Mark the next
|
|
||||||
// hops as 'Disestablished' so that new relay tunnels are created in the future.
|
|
||||||
hm.unlockedDisestablishVpnAddrRelayFor(hostinfo)
|
|
||||||
}
|
|
||||||
// Clean up any local relay indexes for which I am acting as a relay hop
|
|
||||||
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
delete(hm.Relays, localRelayIdx)
|
delete(hm.Relays, localRelayIdx)
|
||||||
}
|
}
|
||||||
@@ -490,11 +429,11 @@ func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
||||||
return hm.queryVpnIp(vpnIp, nil)
|
return hm.queryVpnIp(vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp netip.Addr) (*HostInfo, *Relay, error) {
|
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*HostInfo, *Relay, error) {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
defer hm.RUnlock()
|
defer hm.RUnlock()
|
||||||
|
|
||||||
@@ -512,34 +451,13 @@ func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp netip.Addr) (*HostIn
|
|||||||
return nil, nil, errors.New("unable to find host with relay")
|
return nil, nil, errors.New("unable to find host with relay")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) unlockedDisestablishVpnAddrRelayFor(hi *HostInfo) {
|
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) *HostInfo {
|
||||||
for _, relayHostIp := range hi.relayState.CopyRelayIps() {
|
|
||||||
if h, ok := hm.Hosts[relayHostIp]; ok {
|
|
||||||
for h != nil {
|
|
||||||
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
|
||||||
h = h.next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, rs := range hi.relayState.CopyAllRelayFor() {
|
|
||||||
if rs.Type == ForwardingType {
|
|
||||||
if h, ok := hm.Hosts[rs.PeerIp]; ok {
|
|
||||||
for h != nil {
|
|
||||||
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
|
||||||
h = h.next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) queryVpnIp(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
|
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
if h, ok := hm.Hosts[vpnIp]; ok {
|
if h, ok := hm.Hosts[vpnIp]; ok {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
// Do not attempt promotion if you are a lighthouse
|
// Do not attempt promotion if you are a lighthouse
|
||||||
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
|
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
|
||||||
h.TryPromoteBest(hm.GetPreferredRanges(), promoteIfce)
|
h.TryPromoteBest(hm.preferredRanges, promoteIfce)
|
||||||
}
|
}
|
||||||
return h
|
return h
|
||||||
|
|
||||||
@@ -585,9 +503,8 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) GetPreferredRanges() []netip.Prefix {
|
func (hm *HostMap) GetPreferredRanges() []*net.IPNet {
|
||||||
//NOTE: if preferredRanges is ever not stored before a load this will fail to dereference a nil pointer
|
return hm.preferredRanges
|
||||||
return *hm.preferredRanges.Load()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) ForEachVpnIp(f controlEach) {
|
func (hm *HostMap) ForEachVpnIp(f controlEach) {
|
||||||
@@ -610,14 +527,14 @@ func (hm *HostMap) ForEachIndex(f controlEach) {
|
|||||||
|
|
||||||
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
||||||
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
||||||
func (i *HostInfo) TryPromoteBest(preferredRanges []netip.Prefix, ifce *Interface) {
|
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
|
||||||
c := i.promoteCounter.Add(1)
|
c := i.promoteCounter.Add(1)
|
||||||
if c%ifce.tryPromoteEvery.Load() == 0 {
|
if c%ifce.tryPromoteEvery.Load() == 0 {
|
||||||
remote := i.remote
|
remote := i.remote
|
||||||
|
|
||||||
// return early if we are already on a preferred remote
|
// return early if we are already on a preferred remote
|
||||||
if remote.IsValid() {
|
if remote != nil {
|
||||||
rIP := remote.Addr()
|
rIP := remote.IP
|
||||||
for _, l := range preferredRanges {
|
for _, l := range preferredRanges {
|
||||||
if l.Contains(rIP) {
|
if l.Contains(rIP) {
|
||||||
return
|
return
|
||||||
@@ -625,8 +542,8 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []netip.Prefix, ifce *Interfac
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
i.remotes.ForEach(preferredRanges, func(addr netip.AddrPort, preferred bool) {
|
i.remotes.ForEach(preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||||
if remote.IsValid() && (!addr.IsValid() || !preferred) {
|
if remote != nil && (addr == nil || !preferred) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -655,23 +572,23 @@ func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) SetRemote(remote netip.AddrPort) {
|
func (i *HostInfo) SetRemote(remote *udp.Addr) {
|
||||||
// We copy here because we likely got this remote from a source that reuses the object
|
// We copy here because we likely got this remote from a source that reuses the object
|
||||||
if i.remote != remote {
|
if !i.remote.Equals(remote) {
|
||||||
i.remote = remote
|
i.remote = remote.Copy()
|
||||||
i.remotes.LearnRemote(i.vpnIp, remote)
|
i.remotes.LearnRemote(i.vpnIp, remote.Copy())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRemoteIfPreferred returns true if the remote was changed. The lastRoam
|
// SetRemoteIfPreferred returns true if the remote was changed. The lastRoam
|
||||||
// time on the HostInfo will also be updated.
|
// time on the HostInfo will also be updated.
|
||||||
func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) bool {
|
func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
||||||
if !newRemote.IsValid() {
|
if newRemote == nil {
|
||||||
// relays have nil udp Addrs
|
// relays have nil udp Addrs
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
currentRemote := i.remote
|
currentRemote := i.remote
|
||||||
if !currentRemote.IsValid() {
|
if currentRemote == nil {
|
||||||
i.SetRemote(newRemote)
|
i.SetRemote(newRemote)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -679,13 +596,13 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) b
|
|||||||
// NOTE: We do this loop here instead of calling `isPreferred` in
|
// NOTE: We do this loop here instead of calling `isPreferred` in
|
||||||
// remote_list.go so that we only have to loop over preferredRanges once.
|
// remote_list.go so that we only have to loop over preferredRanges once.
|
||||||
newIsPreferred := false
|
newIsPreferred := false
|
||||||
for _, l := range hm.GetPreferredRanges() {
|
for _, l := range hm.preferredRanges {
|
||||||
// return early if we are already on a preferred remote
|
// return early if we are already on a preferred remote
|
||||||
if l.Contains(currentRemote.Addr()) {
|
if l.Contains(currentRemote.IP) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.Contains(newRemote.Addr()) {
|
if l.Contains(newRemote.IP) {
|
||||||
newIsPreferred = true
|
newIsPreferred = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -693,7 +610,7 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) b
|
|||||||
if newIsPreferred {
|
if newIsPreferred {
|
||||||
// Consider this a roaming event
|
// Consider this a roaming event
|
||||||
i.lastRoam = time.Now()
|
i.lastRoam = time.Now()
|
||||||
i.lastRoamRemote = currentRemote
|
i.lastRoamRemote = currentRemote.Copy()
|
||||||
|
|
||||||
i.SetRemote(newRemote)
|
i.SetRemote(newRemote)
|
||||||
|
|
||||||
@@ -703,26 +620,26 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) b
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (i *HostInfo) RecvErrorExceeded() bool {
|
||||||
|
if i.recvError.Add(1) >= maxRecvError {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
|
func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
|
||||||
if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 {
|
if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 {
|
||||||
// Simple case, no CIDRTree needed
|
// Simple case, no CIDRTree needed
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteCidr := new(bart.Table[struct{}])
|
remoteCidr := cidr.NewTree4[struct{}]()
|
||||||
for _, ip := range c.Details.Ips {
|
for _, ip := range c.Details.Ips {
|
||||||
//TODO: IPV6-WORK what to do when ip is invalid?
|
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||||
nip, _ := netip.AddrFromSlice(ip.IP)
|
|
||||||
nip = nip.Unmap()
|
|
||||||
remoteCidr.Insert(netip.PrefixFrom(nip, nip.BitLen()), struct{}{})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range c.Details.Subnets {
|
for _, n := range c.Details.Subnets {
|
||||||
//TODO: IPV6-WORK what to do when ip is invalid?
|
remoteCidr.AddCIDR(n, struct{}{})
|
||||||
nip, _ := netip.AddrFromSlice(n.IP)
|
|
||||||
nip = nip.Unmap()
|
|
||||||
bits, _ := n.Mask.Size()
|
|
||||||
remoteCidr.Insert(netip.PrefixFrom(nip, bits), struct{}{})
|
|
||||||
}
|
}
|
||||||
i.remoteCidr = remoteCidr
|
i.remoteCidr = remoteCidr
|
||||||
}
|
}
|
||||||
@@ -747,9 +664,9 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
|||||||
|
|
||||||
// Utility functions
|
// Utility functions
|
||||||
|
|
||||||
func localIps(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
|
func localIps(l *logrus.Logger, allowList *LocalAllowList) *[]net.IP {
|
||||||
//FIXME: This function is pretty garbage
|
//FIXME: This function is pretty garbage
|
||||||
var ips []netip.Addr
|
var ips []net.IP
|
||||||
ifaces, _ := net.Interfaces()
|
ifaces, _ := net.Interfaces()
|
||||||
for _, i := range ifaces {
|
for _, i := range ifaces {
|
||||||
allow := allowList.AllowName(i.Name)
|
allow := allowList.AllowName(i.Name)
|
||||||
@@ -771,29 +688,20 @@ func localIps(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
|
|||||||
ip = v.IP
|
ip = v.IP
|
||||||
}
|
}
|
||||||
|
|
||||||
nip, ok := netip.AddrFromSlice(ip)
|
|
||||||
if !ok {
|
|
||||||
if l.Level >= logrus.DebugLevel {
|
|
||||||
l.WithField("localIp", ip).Debug("ip was invalid for netip")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
nip = nip.Unmap()
|
|
||||||
|
|
||||||
//TODO: Filtering out link local for now, this is probably the most correct thing
|
//TODO: Filtering out link local for now, this is probably the most correct thing
|
||||||
//TODO: Would be nice to filter out SLAAC MAC based ips as well
|
//TODO: Would be nice to filter out SLAAC MAC based ips as well
|
||||||
if nip.IsLoopback() == false && nip.IsLinkLocalUnicast() == false {
|
if ip.IsLoopback() == false && !ip.IsLinkLocalUnicast() {
|
||||||
allow := allowList.Allow(nip)
|
allow := allowList.Allow(ip)
|
||||||
if l.Level >= logrus.TraceLevel {
|
if l.Level >= logrus.TraceLevel {
|
||||||
l.WithField("localIp", nip).WithField("allow", allow).Trace("localAllowList.Allow")
|
l.WithField("localIp", ip).WithField("allow", allow).Trace("localAllowList.Allow")
|
||||||
}
|
}
|
||||||
if !allow {
|
if !allow {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
ips = append(ips, nip)
|
ips = append(ips, ip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ips
|
return &ips
|
||||||
}
|
}
|
||||||
|
|||||||
149
hostmap_test.go
149
hostmap_test.go
@@ -2,29 +2,29 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/slackhq/nebula/config"
|
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHostMap_MakePrimary(t *testing.T) {
|
func TestHostMap_MakePrimary(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
hm := newHostMap(
|
hm := NewHostMap(
|
||||||
l,
|
l,
|
||||||
netip.MustParsePrefix("10.0.0.1/24"),
|
&net.IPNet{
|
||||||
|
IP: net.IP{10, 0, 0, 1},
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
},
|
||||||
|
[]*net.IPNet{},
|
||||||
)
|
)
|
||||||
|
|
||||||
f := &Interface{}
|
f := &Interface{}
|
||||||
|
|
||||||
h1 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 1}
|
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
|
||||||
h2 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 2}
|
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
|
||||||
h3 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 3}
|
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
|
||||||
h4 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 4}
|
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
|
||||||
|
|
||||||
hm.unlockedAddHostInfo(h4, f)
|
hm.unlockedAddHostInfo(h4, f)
|
||||||
hm.unlockedAddHostInfo(h3, f)
|
hm.unlockedAddHostInfo(h3, f)
|
||||||
@@ -32,7 +32,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.unlockedAddHostInfo(h1, f)
|
hm.unlockedAddHostInfo(h1, f)
|
||||||
|
|
||||||
// Make sure we go h1 -> h2 -> h3 -> h4
|
// Make sure we go h1 -> h2 -> h3 -> h4
|
||||||
prim := hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim := hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -47,7 +47,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.MakePrimary(h3)
|
hm.MakePrimary(h3)
|
||||||
|
|
||||||
// Make sure we go h3 -> h1 -> h2 -> h4
|
// Make sure we go h3 -> h1 -> h2 -> h4
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h3.localIndexId, prim.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -62,7 +62,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.MakePrimary(h4)
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
// Make sure we go h4 -> h3 -> h1 -> h2
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -77,7 +77,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.MakePrimary(h4)
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
// Make sure we go h4 -> h3 -> h1 -> h2
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -89,55 +89,25 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
assert.Nil(t, h2.next)
|
assert.Nil(t, h2.next)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHostInfo_CreateRemoteCIDR(t *testing.T) {
|
|
||||||
h := HostInfo{}
|
|
||||||
c := &cert.NebulaCertificate{
|
|
||||||
Details: cert.NebulaCertificateDetails{
|
|
||||||
Ips: []*net.IPNet{
|
|
||||||
{
|
|
||||||
IP: net.IPv4(1, 2, 3, 4),
|
|
||||||
Mask: net.IPv4Mask(255, 255, 255, 0),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// remoteCidr should be empty with only 1 ip address present in the certificate
|
|
||||||
h.CreateRemoteCIDR(c)
|
|
||||||
assert.Empty(t, h.remoteCidr)
|
|
||||||
|
|
||||||
// remoteCidr should be populated if there is also a subnet in the certificate
|
|
||||||
c.Details.Subnets = []*net.IPNet{
|
|
||||||
{
|
|
||||||
IP: net.IPv4(9, 2, 3, 4),
|
|
||||||
Mask: net.IPv4Mask(255, 255, 255, 0),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
h.CreateRemoteCIDR(c)
|
|
||||||
assert.NotEmpty(t, h.remoteCidr)
|
|
||||||
_, ok := h.remoteCidr.Lookup(netip.MustParseAddr("1.2.3.0"))
|
|
||||||
assert.False(t, ok, "An ip address within the certificates network should not be found")
|
|
||||||
_, ok = h.remoteCidr.Lookup(netip.MustParseAddr("1.2.3.4"))
|
|
||||||
assert.True(t, ok, "An exact ip address match should be found")
|
|
||||||
_, ok = h.remoteCidr.Lookup(netip.MustParseAddr("9.2.3.4"))
|
|
||||||
assert.True(t, ok, "An ip address within the subnets should be found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHostMap_DeleteHostInfo(t *testing.T) {
|
func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
hm := newHostMap(
|
hm := NewHostMap(
|
||||||
l,
|
l,
|
||||||
netip.MustParsePrefix("10.0.0.1/24"),
|
&net.IPNet{
|
||||||
|
IP: net.IP{10, 0, 0, 1},
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
},
|
||||||
|
[]*net.IPNet{},
|
||||||
)
|
)
|
||||||
|
|
||||||
f := &Interface{}
|
f := &Interface{}
|
||||||
|
|
||||||
h1 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 1}
|
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
|
||||||
h2 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 2}
|
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
|
||||||
h3 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 3}
|
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
|
||||||
h4 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 4}
|
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
|
||||||
h5 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 5}
|
h5 := &HostInfo{vpnIp: 1, localIndexId: 5}
|
||||||
h6 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 6}
|
h6 := &HostInfo{vpnIp: 1, localIndexId: 6}
|
||||||
|
|
||||||
hm.unlockedAddHostInfo(h6, f)
|
hm.unlockedAddHostInfo(h6, f)
|
||||||
hm.unlockedAddHostInfo(h5, f)
|
hm.unlockedAddHostInfo(h5, f)
|
||||||
@@ -153,7 +123,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h)
|
assert.Nil(t, h)
|
||||||
|
|
||||||
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
|
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
|
||||||
prim := hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim := hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -172,7 +142,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h1.next)
|
assert.Nil(t, h1.next)
|
||||||
|
|
||||||
// Make sure we go h2 -> h3 -> h4 -> h5
|
// Make sure we go h2 -> h3 -> h4 -> h5
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -190,7 +160,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h3.next)
|
assert.Nil(t, h3.next)
|
||||||
|
|
||||||
// Make sure we go h2 -> h4 -> h5
|
// Make sure we go h2 -> h4 -> h5
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -206,7 +176,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h5.next)
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
// Make sure we go h2 -> h4
|
// Make sure we go h2 -> h4
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@@ -220,7 +190,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h2.next)
|
assert.Nil(t, h2.next)
|
||||||
|
|
||||||
// Make sure we only have h4
|
// Make sure we only have h4
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
assert.Nil(t, prim.next)
|
assert.Nil(t, prim.next)
|
||||||
@@ -232,61 +202,6 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h4.next)
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
// Make sure we have nil
|
// Make sure we have nil
|
||||||
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(1)
|
||||||
assert.Nil(t, prim)
|
assert.Nil(t, prim)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHostMap_reload(t *testing.T) {
|
|
||||||
l := test.NewLogger()
|
|
||||||
c := config.NewC(l)
|
|
||||||
|
|
||||||
hm := NewHostMapFromConfig(
|
|
||||||
l,
|
|
||||||
netip.MustParsePrefix("10.0.0.1/24"),
|
|
||||||
c,
|
|
||||||
)
|
|
||||||
|
|
||||||
toS := func(ipn []netip.Prefix) []string {
|
|
||||||
var s []string
|
|
||||||
for _, n := range ipn {
|
|
||||||
s = append(s, n.String())
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Empty(t, hm.GetPreferredRanges())
|
|
||||||
|
|
||||||
c.ReloadConfigString("preferred_ranges: [1.1.1.0/24, 10.1.1.0/24]")
|
|
||||||
assert.EqualValues(t, []string{"1.1.1.0/24", "10.1.1.0/24"}, toS(hm.GetPreferredRanges()))
|
|
||||||
|
|
||||||
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
|
||||||
assert.EqualValues(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHostMap_RelayState(t *testing.T) {
|
|
||||||
h1 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 1}
|
|
||||||
a1 := netip.MustParseAddr("::1")
|
|
||||||
a2 := netip.MustParseAddr("2001::1")
|
|
||||||
|
|
||||||
h1.relayState.InsertRelayTo(a1)
|
|
||||||
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
|
||||||
h1.relayState.InsertRelayTo(a2)
|
|
||||||
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1, a2})
|
|
||||||
// Ensure that the first relay added is the first one returned in the copy
|
|
||||||
currentRelays := h1.relayState.CopyRelayIps()
|
|
||||||
require.Len(t, currentRelays, 2)
|
|
||||||
assert.Equal(t, currentRelays[0], a1)
|
|
||||||
|
|
||||||
// Deleting the last one in the list works ok
|
|
||||||
h1.relayState.DeleteRelay(a2)
|
|
||||||
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
|
||||||
|
|
||||||
// Deleting an element not in the list works ok
|
|
||||||
h1.relayState.DeleteRelay(a2)
|
|
||||||
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
|
||||||
|
|
||||||
// Deleting the only element in the list works ok
|
|
||||||
h1.relayState.DeleteRelay(a1)
|
|
||||||
assert.Equal(t, h1.relayState.relays, []netip.Addr{})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -5,11 +5,9 @@ package nebula
|
|||||||
|
|
||||||
// This file contains functions used to export information to the e2e testing framework
|
// This file contains functions used to export information to the e2e testing framework
|
||||||
|
|
||||||
import (
|
import "github.com/slackhq/nebula/iputil"
|
||||||
"net/netip"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (i *HostInfo) GetVpnIp() netip.Addr {
|
func (i *HostInfo) GetVpnIp() iputil.VpnIp {
|
||||||
return i.vpnIp
|
return i.vpnIp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
52
inside.go
52
inside.go
@@ -1,13 +1,12 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/noiseutil"
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
@@ -20,11 +19,11 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ignore local broadcast packets
|
// Ignore local broadcast packets
|
||||||
if f.dropLocalBroadcast && fwPacket.RemoteIP == f.myBroadcastAddr {
|
if f.dropLocalBroadcast && fwPacket.RemoteIP == f.localBroadcast {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if fwPacket.RemoteIP == f.myVpnNet.Addr() {
|
if fwPacket.RemoteIP == f.myVpnIp {
|
||||||
// Immediately forward packets from self to self.
|
// Immediately forward packets from self to self.
|
||||||
// This should only happen on Darwin-based and FreeBSD hosts, which
|
// This should only happen on Darwin-based and FreeBSD hosts, which
|
||||||
// routes packets from the Nebula IP to the Nebula IP through the Nebula
|
// routes packets from the Nebula IP to the Nebula IP through the Nebula
|
||||||
@@ -40,8 +39,8 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore multicast packets
|
// Ignore broadcast packets
|
||||||
if f.dropMulticast && fwPacket.RemoteIP.IsMulticast() {
|
if f.dropMulticast && isMulticast(fwPacket.RemoteIP) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,9 +62,9 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
dropReason := f.firewall.Drop(*fwPacket, false, hostinfo, f.pki.GetCAPool(), localCache)
|
dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.pki.GetCAPool(), localCache)
|
||||||
if dropReason == nil {
|
if dropReason == nil {
|
||||||
f.sendNoMetrics(header.Message, 0, hostinfo.ConnectionState, hostinfo, netip.AddrPort{}, packet, nb, out, q)
|
f.sendNoMetrics(header.Message, 0, hostinfo.ConnectionState, hostinfo, nil, packet, nb, out, q)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
f.rejectInside(packet, out, q)
|
f.rejectInside(packet, out, q)
|
||||||
@@ -114,19 +113,19 @@ func (f *Interface) rejectOutside(packet []byte, ci *ConnectionState, hostinfo *
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, netip.AddrPort{}, out, nb, packet, q)
|
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, out, nb, packet, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) Handshake(vpnIp netip.Addr) {
|
func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
|
||||||
f.getOrHandshake(vpnIp, nil)
|
f.getOrHandshake(vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getOrHandshake returns nil if the vpnIp is not routable.
|
// getOrHandshake returns nil if the vpnIp is not routable.
|
||||||
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
|
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
|
||||||
func (f *Interface) getOrHandshake(vpnIp netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||||
if !f.myVpnNet.Contains(vpnIp) {
|
if !ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, vpnIp) {
|
||||||
vpnIp = f.inside.RouteFor(vpnIp)
|
vpnIp = f.inside.RouteFor(vpnIp)
|
||||||
if !vpnIp.IsValid() {
|
if vpnIp == 0 {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -143,7 +142,7 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if packet is in outbound fw rules
|
// check if packet is in outbound fw rules
|
||||||
dropReason := f.firewall.Drop(*fp, false, hostinfo, f.pki.GetCAPool(), nil)
|
dropReason := f.firewall.Drop(p, *fp, false, hostinfo, f.pki.GetCAPool(), nil)
|
||||||
if dropReason != nil {
|
if dropReason != nil {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("fwPacket", fp).
|
f.l.WithField("fwPacket", fp).
|
||||||
@@ -153,11 +152,11 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.sendNoMetrics(header.Message, st, hostinfo.ConnectionState, hostinfo, netip.AddrPort{}, p, nb, out, 0)
|
f.sendNoMetrics(header.Message, st, hostinfo.ConnectionState, hostinfo, nil, p, nb, out, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
||||||
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte) {
|
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
||||||
hostInfo, ready := f.getOrHandshake(vpnIp, func(hh *HandshakeHostInfo) {
|
hostInfo, ready := f.getOrHandshake(vpnIp, func(hh *HandshakeHostInfo) {
|
||||||
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
||||||
})
|
})
|
||||||
@@ -183,10 +182,10 @@ func (f *Interface) SendMessageToHostInfo(t header.MessageType, st header.Messag
|
|||||||
|
|
||||||
func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
f.messageMetrics.Tx(t, st, 1)
|
f.messageMetrics.Tx(t, st, 1)
|
||||||
f.sendNoMetrics(t, st, ci, hostinfo, netip.AddrPort{}, p, nb, out, 0)
|
f.sendNoMetrics(t, st, ci, hostinfo, nil, p, nb, out, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote netip.AddrPort, p, nb, out []byte) {
|
func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte) {
|
||||||
f.messageMetrics.Tx(t, st, 1)
|
f.messageMetrics.Tx(t, st, 1)
|
||||||
f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
|
f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
|
||||||
}
|
}
|
||||||
@@ -213,7 +212,7 @@ func (f *Interface) SendVia(via *HostInfo,
|
|||||||
c := via.ConnectionState.messageCounter.Add(1)
|
c := via.ConnectionState.messageCounter.Add(1)
|
||||||
|
|
||||||
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
||||||
f.connectionManager.Out(via)
|
f.connectionManager.Out(via.localIndexId)
|
||||||
|
|
||||||
// Authenticate the header and payload, but do not encrypt for this message type.
|
// Authenticate the header and payload, but do not encrypt for this message type.
|
||||||
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
||||||
@@ -256,12 +255,12 @@ func (f *Interface) SendVia(via *HostInfo,
|
|||||||
f.connectionManager.RelayUsed(relay.LocalIndex)
|
f.connectionManager.RelayUsed(relay.LocalIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote netip.AddrPort, p, nb, out []byte, q int) {
|
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) {
|
||||||
if ci.eKey == nil {
|
if ci.eKey == nil {
|
||||||
//TODO: log warning
|
//TODO: log warning
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
useRelay := !remote.IsValid() && !hostinfo.remote.IsValid()
|
useRelay := remote == nil && hostinfo.remote == nil
|
||||||
fullOut := out
|
fullOut := out
|
||||||
|
|
||||||
if useRelay {
|
if useRelay {
|
||||||
@@ -282,7 +281,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
|
|
||||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||||
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||||
f.connectionManager.Out(hostinfo)
|
f.connectionManager.Out(hostinfo.localIndexId)
|
||||||
|
|
||||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||||
// all our IPs and enable a faster roaming.
|
// all our IPs and enable a faster roaming.
|
||||||
@@ -309,13 +308,13 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if remote.IsValid() {
|
if remote != nil {
|
||||||
err = f.writers[q].WriteTo(out, remote)
|
err = f.writers[q].WriteTo(out, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).
|
hostinfo.logger(f.l).WithError(err).
|
||||||
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
||||||
}
|
}
|
||||||
} else if hostinfo.remote.IsValid() {
|
} else if hostinfo.remote != nil {
|
||||||
err = f.writers[q].WriteTo(out, hostinfo.remote)
|
err = f.writers[q].WriteTo(out, hostinfo.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).
|
hostinfo.logger(f.l).WithError(err).
|
||||||
@@ -335,3 +334,8 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isMulticast(ip iputil.VpnIp) bool {
|
||||||
|
// Class D multicast
|
||||||
|
return (((ip >> 24) & 0xff) & 0xf0) == 0xe0
|
||||||
|
}
|
||||||
|
|||||||
88
interface.go
88
interface.go
@@ -2,11 +2,10 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -17,6 +16,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/overlay"
|
"github.com/slackhq/nebula/overlay"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
@@ -24,23 +24,24 @@ import (
|
|||||||
const mtu = 9001
|
const mtu = 9001
|
||||||
|
|
||||||
type InterfaceConfig struct {
|
type InterfaceConfig struct {
|
||||||
HostMap *HostMap
|
HostMap *HostMap
|
||||||
Outside udp.Conn
|
Outside udp.Conn
|
||||||
Inside overlay.Device
|
Inside overlay.Device
|
||||||
pki *PKI
|
pki *PKI
|
||||||
Cipher string
|
Cipher string
|
||||||
Firewall *Firewall
|
Firewall *Firewall
|
||||||
ServeDns bool
|
ServeDns bool
|
||||||
HandshakeManager *HandshakeManager
|
HandshakeManager *HandshakeManager
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
connectionManager *connectionManager
|
checkInterval time.Duration
|
||||||
DropLocalBroadcast bool
|
pendingDeletionInterval time.Duration
|
||||||
DropMulticast bool
|
DropLocalBroadcast bool
|
||||||
routines int
|
DropMulticast bool
|
||||||
MessageMetrics *MessageMetrics
|
routines int
|
||||||
version string
|
MessageMetrics *MessageMetrics
|
||||||
relayManager *relayManager
|
version string
|
||||||
punchy *Punchy
|
relayManager *relayManager
|
||||||
|
punchy *Punchy
|
||||||
|
|
||||||
tryPromoteEvery uint32
|
tryPromoteEvery uint32
|
||||||
reQueryEvery uint32
|
reQueryEvery uint32
|
||||||
@@ -62,8 +63,8 @@ type Interface struct {
|
|||||||
serveDns bool
|
serveDns bool
|
||||||
createTime time.Time
|
createTime time.Time
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
myBroadcastAddr netip.Addr
|
localBroadcast iputil.VpnIp
|
||||||
myVpnNet netip.Prefix
|
myVpnIp iputil.VpnIp
|
||||||
dropLocalBroadcast bool
|
dropLocalBroadcast bool
|
||||||
dropMulticast bool
|
dropMulticast bool
|
||||||
routines int
|
routines int
|
||||||
@@ -101,9 +102,9 @@ type EncWriter interface {
|
|||||||
out []byte,
|
out []byte,
|
||||||
nocopy bool,
|
nocopy bool,
|
||||||
)
|
)
|
||||||
SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte)
|
SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte)
|
||||||
SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte)
|
SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte)
|
||||||
Handshake(vpnIp netip.Addr)
|
Handshake(vpnIp iputil.VpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
type sendRecvErrorConfig uint8
|
type sendRecvErrorConfig uint8
|
||||||
@@ -114,10 +115,10 @@ const (
|
|||||||
sendRecvErrorPrivate
|
sendRecvErrorPrivate
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s sendRecvErrorConfig) ShouldSendRecvError(ip netip.AddrPort) bool {
|
func (s sendRecvErrorConfig) ShouldSendRecvError(ip net.IP) bool {
|
||||||
switch s {
|
switch s {
|
||||||
case sendRecvErrorPrivate:
|
case sendRecvErrorPrivate:
|
||||||
return ip.Addr().IsPrivate()
|
return ip.IsPrivate()
|
||||||
case sendRecvErrorAlways:
|
case sendRecvErrorAlways:
|
||||||
return true
|
return true
|
||||||
case sendRecvErrorNever:
|
case sendRecvErrorNever:
|
||||||
@@ -153,32 +154,9 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
if c.Firewall == nil {
|
if c.Firewall == nil {
|
||||||
return nil, errors.New("no firewall rules")
|
return nil, errors.New("no firewall rules")
|
||||||
}
|
}
|
||||||
if c.connectionManager == nil {
|
|
||||||
return nil, errors.New("no connection manager")
|
|
||||||
}
|
|
||||||
|
|
||||||
certificate := c.pki.GetCertState().Certificate
|
certificate := c.pki.GetCertState().Certificate
|
||||||
|
myVpnIp := iputil.Ip2VpnIp(certificate.Details.Ips[0].IP)
|
||||||
myVpnAddr, ok := netip.AddrFromSlice(certificate.Details.Ips[0].IP)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("invalid ip address in certificate: %s", certificate.Details.Ips[0].IP)
|
|
||||||
}
|
|
||||||
|
|
||||||
myVpnMask, ok := netip.AddrFromSlice(certificate.Details.Ips[0].Mask)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("invalid ip mask in certificate: %s", certificate.Details.Ips[0].Mask)
|
|
||||||
}
|
|
||||||
|
|
||||||
myVpnAddr = myVpnAddr.Unmap()
|
|
||||||
myVpnMask = myVpnMask.Unmap()
|
|
||||||
|
|
||||||
if myVpnAddr.BitLen() != myVpnMask.BitLen() {
|
|
||||||
return nil, fmt.Errorf("ip address and mask are different lengths in certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
ones, _ := certificate.Details.Ips[0].Mask.Size()
|
|
||||||
myVpnNet := netip.PrefixFrom(myVpnAddr, ones)
|
|
||||||
|
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
pki: c.pki,
|
pki: c.pki,
|
||||||
hostMap: c.HostMap,
|
hostMap: c.HostMap,
|
||||||
@@ -190,15 +168,15 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
handshakeManager: c.HandshakeManager,
|
handshakeManager: c.HandshakeManager,
|
||||||
createTime: time.Now(),
|
createTime: time.Now(),
|
||||||
lightHouse: c.lightHouse,
|
lightHouse: c.lightHouse,
|
||||||
|
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(certificate.Details.Ips[0].Mask),
|
||||||
dropLocalBroadcast: c.DropLocalBroadcast,
|
dropLocalBroadcast: c.DropLocalBroadcast,
|
||||||
dropMulticast: c.DropMulticast,
|
dropMulticast: c.DropMulticast,
|
||||||
routines: c.routines,
|
routines: c.routines,
|
||||||
version: c.version,
|
version: c.version,
|
||||||
writers: make([]udp.Conn, c.routines),
|
writers: make([]udp.Conn, c.routines),
|
||||||
readers: make([]io.ReadWriteCloser, c.routines),
|
readers: make([]io.ReadWriteCloser, c.routines),
|
||||||
myVpnNet: myVpnNet,
|
myVpnIp: myVpnIp,
|
||||||
relayManager: c.relayManager,
|
relayManager: c.relayManager,
|
||||||
connectionManager: c.connectionManager,
|
|
||||||
|
|
||||||
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
||||||
|
|
||||||
@@ -212,17 +190,11 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
l: c.l,
|
l: c.l,
|
||||||
}
|
}
|
||||||
|
|
||||||
if myVpnAddr.Is4() {
|
|
||||||
addr := myVpnNet.Masked().Addr().As4()
|
|
||||||
binary.BigEndian.PutUint32(addr[:], binary.BigEndian.Uint32(addr[:])|^binary.BigEndian.Uint32(certificate.Details.Ips[0].Mask))
|
|
||||||
ifce.myBroadcastAddr = netip.AddrFrom4(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
||||||
ifce.reQueryEvery.Store(c.reQueryEvery)
|
ifce.reQueryEvery.Store(c.reQueryEvery)
|
||||||
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
||||||
|
|
||||||
ifce.connectionManager.intf = ifce
|
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
|
||||||
|
|
||||||
return ifce, nil
|
return ifce, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,8 +6,6 @@ import (
|
|||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
)
|
)
|
||||||
|
|
||||||
//TODO: IPV6-WORK can probably delete this
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Need 96 bytes for the largest reject packet:
|
// Need 96 bytes for the largest reject packet:
|
||||||
// - 20 byte ipv4 header
|
// - 20 byte ipv4 header
|
||||||
|
|||||||
93
iputil/util.go
Normal file
93
iputil/util.go
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
package iputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
)
|
||||||
|
|
||||||
|
type VpnIp uint32
|
||||||
|
|
||||||
|
const maxIPv4StringLen = len("255.255.255.255")
|
||||||
|
|
||||||
|
func (ip VpnIp) String() string {
|
||||||
|
b := make([]byte, maxIPv4StringLen)
|
||||||
|
|
||||||
|
n := ubtoa(b, 0, byte(ip>>24))
|
||||||
|
b[n] = '.'
|
||||||
|
n++
|
||||||
|
|
||||||
|
n += ubtoa(b, n, byte(ip>>16&255))
|
||||||
|
b[n] = '.'
|
||||||
|
n++
|
||||||
|
|
||||||
|
n += ubtoa(b, n, byte(ip>>8&255))
|
||||||
|
b[n] = '.'
|
||||||
|
n++
|
||||||
|
|
||||||
|
n += ubtoa(b, n, byte(ip&255))
|
||||||
|
return string(b[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ip VpnIp) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(fmt.Sprintf("\"%s\"", ip.String())), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ip VpnIp) ToIP() net.IP {
|
||||||
|
nip := make(net.IP, 4)
|
||||||
|
binary.BigEndian.PutUint32(nip, uint32(ip))
|
||||||
|
return nip
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ip VpnIp) ToNetIpAddr() netip.Addr {
|
||||||
|
var nip [4]byte
|
||||||
|
binary.BigEndian.PutUint32(nip[:], uint32(ip))
|
||||||
|
return netip.AddrFrom4(nip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Ip2VpnIp(ip []byte) VpnIp {
|
||||||
|
if len(ip) == 16 {
|
||||||
|
return VpnIp(binary.BigEndian.Uint32(ip[12:16]))
|
||||||
|
}
|
||||||
|
return VpnIp(binary.BigEndian.Uint32(ip))
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToNetIpAddr(ip net.IP) (netip.Addr, error) {
|
||||||
|
addr, ok := netip.AddrFromSlice(ip)
|
||||||
|
if !ok {
|
||||||
|
return netip.Addr{}, fmt.Errorf("invalid net.IP: %v", ip)
|
||||||
|
}
|
||||||
|
return addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToNetIpPrefix(ipNet net.IPNet) (netip.Prefix, error) {
|
||||||
|
addr, err := ToNetIpAddr(ipNet.IP)
|
||||||
|
if err != nil {
|
||||||
|
return netip.Prefix{}, err
|
||||||
|
}
|
||||||
|
ones, bits := ipNet.Mask.Size()
|
||||||
|
if ones == 0 && bits == 0 {
|
||||||
|
return netip.Prefix{}, fmt.Errorf("invalid net.IP: %v", ipNet)
|
||||||
|
}
|
||||||
|
return netip.PrefixFrom(addr, ones), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ubtoa encodes the string form of the integer v to dst[start:] and
|
||||||
|
// returns the number of bytes written to dst. The caller must ensure
|
||||||
|
// that dst has sufficient length.
|
||||||
|
func ubtoa(dst []byte, start int, v byte) int {
|
||||||
|
if v < 10 {
|
||||||
|
dst[start] = v + '0'
|
||||||
|
return 1
|
||||||
|
} else if v < 100 {
|
||||||
|
dst[start+1] = v%10 + '0'
|
||||||
|
dst[start] = v/10 + '0'
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[start+2] = v%10 + '0'
|
||||||
|
dst[start+1] = (v/10)%10 + '0'
|
||||||
|
dst[start] = v/100 + '0'
|
||||||
|
return 3
|
||||||
|
}
|
||||||
17
iputil/util_test.go
Normal file
17
iputil/util_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package iputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVpnIp_String(t *testing.T) {
|
||||||
|
assert.Equal(t, "255.255.255.255", Ip2VpnIp(net.ParseIP("255.255.255.255")).String())
|
||||||
|
assert.Equal(t, "1.255.255.255", Ip2VpnIp(net.ParseIP("1.255.255.255")).String())
|
||||||
|
assert.Equal(t, "1.1.255.255", Ip2VpnIp(net.ParseIP("1.1.255.255")).String())
|
||||||
|
assert.Equal(t, "1.1.1.255", Ip2VpnIp(net.ParseIP("1.1.1.255")).String())
|
||||||
|
assert.Equal(t, "1.1.1.1", Ip2VpnIp(net.ParseIP("1.1.1.1")).String())
|
||||||
|
assert.Equal(t, "0.0.0.0", Ip2VpnIp(net.ParseIP("0.0.0.0")).String())
|
||||||
|
}
|
||||||
400
lighthouse.go
400
lighthouse.go
@@ -7,16 +7,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"strconv"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/util"
|
||||||
)
|
)
|
||||||
@@ -26,18 +26,25 @@ import (
|
|||||||
|
|
||||||
var ErrHostNotKnown = errors.New("host not known")
|
var ErrHostNotKnown = errors.New("host not known")
|
||||||
|
|
||||||
|
type netIpAndPort struct {
|
||||||
|
ip net.IP
|
||||||
|
port uint16
|
||||||
|
}
|
||||||
|
|
||||||
type LightHouse struct {
|
type LightHouse struct {
|
||||||
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
|
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
|
||||||
sync.RWMutex //Because we concurrently read and write to our maps
|
sync.RWMutex //Because we concurrently read and write to our maps
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
amLighthouse bool
|
amLighthouse bool
|
||||||
myVpnNet netip.Prefix
|
myVpnIp iputil.VpnIp
|
||||||
|
myVpnZeros iputil.VpnIp
|
||||||
|
myVpnNet *net.IPNet
|
||||||
punchConn udp.Conn
|
punchConn udp.Conn
|
||||||
punchy *Punchy
|
punchy *Punchy
|
||||||
|
|
||||||
// Local cache of answers from light houses
|
// Local cache of answers from light houses
|
||||||
// map of vpn Ip to answers
|
// map of vpn Ip to answers
|
||||||
addrMap map[netip.Addr]*RemoteList
|
addrMap map[iputil.VpnIp]*RemoteList
|
||||||
|
|
||||||
// filters remote addresses allowed for each host
|
// filters remote addresses allowed for each host
|
||||||
// - When we are a lighthouse, this filters what addresses we store and
|
// - When we are a lighthouse, this filters what addresses we store and
|
||||||
@@ -50,26 +57,26 @@ type LightHouse struct {
|
|||||||
localAllowList atomic.Pointer[LocalAllowList]
|
localAllowList atomic.Pointer[LocalAllowList]
|
||||||
|
|
||||||
// used to trigger the HandshakeManager when we receive HostQueryReply
|
// used to trigger the HandshakeManager when we receive HostQueryReply
|
||||||
handshakeTrigger chan<- netip.Addr
|
handshakeTrigger chan<- iputil.VpnIp
|
||||||
|
|
||||||
// staticList exists to avoid having a bool in each addrMap entry
|
// staticList exists to avoid having a bool in each addrMap entry
|
||||||
// since static should be rare
|
// since static should be rare
|
||||||
staticList atomic.Pointer[map[netip.Addr]struct{}]
|
staticList atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||||
lighthouses atomic.Pointer[map[netip.Addr]struct{}]
|
lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||||
|
|
||||||
interval atomic.Int64
|
interval atomic.Int64
|
||||||
updateCancel context.CancelFunc
|
updateCancel context.CancelFunc
|
||||||
ifce EncWriter
|
ifce EncWriter
|
||||||
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
||||||
|
|
||||||
advertiseAddrs atomic.Pointer[[]netip.AddrPort]
|
advertiseAddrs atomic.Pointer[[]netIpAndPort]
|
||||||
|
|
||||||
// IP's of relays that can be used by peers to access me
|
// IP's of relays that can be used by peers to access me
|
||||||
relaysForMe atomic.Pointer[[]netip.Addr]
|
relaysForMe atomic.Pointer[[]iputil.VpnIp]
|
||||||
|
|
||||||
queryChan chan netip.Addr
|
queryChan chan iputil.VpnIp
|
||||||
|
|
||||||
calculatedRemotes atomic.Pointer[bart.Table[[]*calculatedRemote]] // Maps VpnIp to []*calculatedRemote
|
calculatedRemotes atomic.Pointer[cidr.Tree4[[]*calculatedRemote]] // Maps VpnIp to []*calculatedRemote
|
||||||
|
|
||||||
metrics *MessageMetrics
|
metrics *MessageMetrics
|
||||||
metricHolepunchTx metrics.Counter
|
metricHolepunchTx metrics.Counter
|
||||||
@@ -78,7 +85,7 @@ type LightHouse struct {
|
|||||||
|
|
||||||
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
|
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
|
||||||
// addrMap should be nil unless this is during a config reload
|
// addrMap should be nil unless this is during a config reload
|
||||||
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet netip.Prefix, pc udp.Conn, p *Punchy) (*LightHouse, error) {
|
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc udp.Conn, p *Punchy) (*LightHouse, error) {
|
||||||
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
||||||
nebulaPort := uint32(c.GetInt("listen.port", 0))
|
nebulaPort := uint32(c.GetInt("listen.port", 0))
|
||||||
if amLighthouse && nebulaPort == 0 {
|
if amLighthouse && nebulaPort == 0 {
|
||||||
@@ -91,23 +98,26 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to get listening port", nil, err)
|
return nil, util.NewContextualError("Failed to get listening port", nil, err)
|
||||||
}
|
}
|
||||||
nebulaPort = uint32(uPort.Port())
|
nebulaPort = uint32(uPort.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ones, _ := myVpnNet.Mask.Size()
|
||||||
h := LightHouse{
|
h := LightHouse{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
amLighthouse: amLighthouse,
|
amLighthouse: amLighthouse,
|
||||||
|
myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP),
|
||||||
|
myVpnZeros: iputil.VpnIp(32 - ones),
|
||||||
myVpnNet: myVpnNet,
|
myVpnNet: myVpnNet,
|
||||||
addrMap: make(map[netip.Addr]*RemoteList),
|
addrMap: make(map[iputil.VpnIp]*RemoteList),
|
||||||
nebulaPort: nebulaPort,
|
nebulaPort: nebulaPort,
|
||||||
punchConn: pc,
|
punchConn: pc,
|
||||||
punchy: p,
|
punchy: p,
|
||||||
queryChan: make(chan netip.Addr, c.GetUint32("handshakes.query_buffer", 64)),
|
queryChan: make(chan iputil.VpnIp, c.GetUint32("handshakes.query_buffer", 64)),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
lighthouses := make(map[netip.Addr]struct{})
|
lighthouses := make(map[iputil.VpnIp]struct{})
|
||||||
h.lighthouses.Store(&lighthouses)
|
h.lighthouses.Store(&lighthouses)
|
||||||
staticList := make(map[netip.Addr]struct{})
|
staticList := make(map[iputil.VpnIp]struct{})
|
||||||
h.staticList.Store(&staticList)
|
h.staticList.Store(&staticList)
|
||||||
|
|
||||||
if c.GetBool("stats.lighthouse_metrics", false) {
|
if c.GetBool("stats.lighthouse_metrics", false) {
|
||||||
@@ -137,11 +147,11 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
|
|||||||
return &h, nil
|
return &h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetStaticHostList() map[netip.Addr]struct{} {
|
func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
|
||||||
return *lh.staticList.Load()
|
return *lh.staticList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetLighthouses() map[netip.Addr]struct{} {
|
func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
|
||||||
return *lh.lighthouses.Load()
|
return *lh.lighthouses.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,15 +163,15 @@ func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
|
|||||||
return lh.localAllowList.Load()
|
return lh.localAllowList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetAdvertiseAddrs() []netip.AddrPort {
|
func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort {
|
||||||
return *lh.advertiseAddrs.Load()
|
return *lh.advertiseAddrs.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetRelaysForMe() []netip.Addr {
|
func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
|
||||||
return *lh.relaysForMe.Load()
|
return *lh.relaysForMe.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) getCalculatedRemotes() *bart.Table[[]*calculatedRemote] {
|
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4[[]*calculatedRemote] {
|
||||||
return lh.calculatedRemotes.Load()
|
return lh.calculatedRemotes.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,40 +182,25 @@ func (lh *LightHouse) GetUpdateInterval() int64 {
|
|||||||
func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||||
if initial || c.HasChanged("lighthouse.advertise_addrs") {
|
if initial || c.HasChanged("lighthouse.advertise_addrs") {
|
||||||
rawAdvAddrs := c.GetStringSlice("lighthouse.advertise_addrs", []string{})
|
rawAdvAddrs := c.GetStringSlice("lighthouse.advertise_addrs", []string{})
|
||||||
advAddrs := make([]netip.AddrPort, 0)
|
advAddrs := make([]netIpAndPort, 0)
|
||||||
|
|
||||||
for i, rawAddr := range rawAdvAddrs {
|
for i, rawAddr := range rawAdvAddrs {
|
||||||
host, sport, err := net.SplitHostPort(rawAddr)
|
fIp, fPort, err := udp.ParseIPAndPort(rawAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.NewContextualError("Unable to parse lighthouse.advertise_addrs entry", m{"addr": rawAddr, "entry": i + 1}, err)
|
return util.NewContextualError("Unable to parse lighthouse.advertise_addrs entry", m{"addr": rawAddr, "entry": i + 1}, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ips, err := net.DefaultResolver.LookupNetIP(context.Background(), "ip", host)
|
if fPort == 0 {
|
||||||
if err != nil {
|
fPort = uint16(lh.nebulaPort)
|
||||||
return util.NewContextualError("Unable to lookup lighthouse.advertise_addrs entry", m{"addr": rawAddr, "entry": i + 1}, err)
|
|
||||||
}
|
|
||||||
if len(ips) == 0 {
|
|
||||||
return util.NewContextualError("Unable to lookup lighthouse.advertise_addrs entry", m{"addr": rawAddr, "entry": i + 1}, nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
port, err := strconv.Atoi(sport)
|
if ip4 := fIp.To4(); ip4 != nil && lh.myVpnNet.Contains(fIp) {
|
||||||
if err != nil {
|
|
||||||
return util.NewContextualError("Unable to parse port in lighthouse.advertise_addrs entry", m{"addr": rawAddr, "entry": i + 1}, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if port == 0 {
|
|
||||||
port = int(lh.nebulaPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
//TODO: we could technically insert all returned ips instead of just the first one if a dns lookup was used
|
|
||||||
ip := ips[0].Unmap()
|
|
||||||
if lh.myVpnNet.Contains(ip) {
|
|
||||||
lh.l.WithField("addr", rawAddr).WithField("entry", i+1).
|
lh.l.WithField("addr", rawAddr).WithField("entry", i+1).
|
||||||
Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range")
|
Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
advAddrs = append(advAddrs, netip.AddrPortFrom(ip, uint16(port)))
|
advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort})
|
||||||
}
|
}
|
||||||
|
|
||||||
lh.advertiseAddrs.Store(&advAddrs)
|
lh.advertiseAddrs.Store(&advAddrs)
|
||||||
@@ -283,8 +278,8 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
lh.RUnlock()
|
lh.RUnlock()
|
||||||
}
|
}
|
||||||
// Build a new list based on current config.
|
// Build a new list based on current config.
|
||||||
staticList := make(map[netip.Addr]struct{})
|
staticList := make(map[iputil.VpnIp]struct{})
|
||||||
err := lh.loadStaticMap(c, staticList)
|
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -308,8 +303,8 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if initial || c.HasChanged("lighthouse.hosts") {
|
if initial || c.HasChanged("lighthouse.hosts") {
|
||||||
lhMap := make(map[netip.Addr]struct{})
|
lhMap := make(map[iputil.VpnIp]struct{})
|
||||||
err := lh.parseLighthouses(c, lhMap)
|
err := lh.parseLighthouses(c, lh.myVpnNet, lhMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -328,17 +323,16 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
if len(c.GetStringSlice("relay.relays", nil)) > 0 {
|
if len(c.GetStringSlice("relay.relays", nil)) > 0 {
|
||||||
lh.l.Info("Ignoring relays from config because am_relay is true")
|
lh.l.Info("Ignoring relays from config because am_relay is true")
|
||||||
}
|
}
|
||||||
relaysForMe := []netip.Addr{}
|
relaysForMe := []iputil.VpnIp{}
|
||||||
lh.relaysForMe.Store(&relaysForMe)
|
lh.relaysForMe.Store(&relaysForMe)
|
||||||
case false:
|
case false:
|
||||||
relaysForMe := []netip.Addr{}
|
relaysForMe := []iputil.VpnIp{}
|
||||||
for _, v := range c.GetStringSlice("relay.relays", nil) {
|
for _, v := range c.GetStringSlice("relay.relays", nil) {
|
||||||
lh.l.WithField("relay", v).Info("Read relay from config")
|
lh.l.WithField("relay", v).Info("Read relay from config")
|
||||||
|
|
||||||
configRIP, err := netip.ParseAddr(v)
|
configRIP := net.ParseIP(v)
|
||||||
//TODO: We could print the error here
|
if configRIP != nil {
|
||||||
if err == nil {
|
relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP))
|
||||||
relaysForMe = append(relaysForMe, configRIP)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lh.relaysForMe.Store(&relaysForMe)
|
lh.relaysForMe.Store(&relaysForMe)
|
||||||
@@ -348,21 +342,21 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) parseLighthouses(c *config.C, lhMap map[netip.Addr]struct{}) error {
|
func (lh *LightHouse) parseLighthouses(c *config.C, tunCidr *net.IPNet, lhMap map[iputil.VpnIp]struct{}) error {
|
||||||
lhs := c.GetStringSlice("lighthouse.hosts", []string{})
|
lhs := c.GetStringSlice("lighthouse.hosts", []string{})
|
||||||
if lh.amLighthouse && len(lhs) != 0 {
|
if lh.amLighthouse && len(lhs) != 0 {
|
||||||
lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, host := range lhs {
|
for i, host := range lhs {
|
||||||
ip, err := netip.ParseAddr(host)
|
ip := net.ParseIP(host)
|
||||||
if err != nil {
|
if ip == nil {
|
||||||
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err)
|
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, nil)
|
||||||
}
|
}
|
||||||
if !lh.myVpnNet.Contains(ip) {
|
if !tunCidr.Contains(ip) {
|
||||||
return util.NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": lh.myVpnNet}, nil)
|
return util.NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": tunCidr.String()}, nil)
|
||||||
}
|
}
|
||||||
lhMap[ip] = struct{}{}
|
lhMap[iputil.Ip2VpnIp(ip)] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !lh.amLighthouse && len(lhMap) == 0 {
|
if !lh.amLighthouse && len(lhMap) == 0 {
|
||||||
@@ -405,7 +399,7 @@ func getStaticMapNetwork(c *config.C) (string, error) {
|
|||||||
return network, nil
|
return network, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struct{}) error {
|
func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error {
|
||||||
d, err := getStaticMapCadence(c)
|
d, err := getStaticMapCadence(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -416,7 +410,7 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
lookupTimeout, err := getStaticMapLookupTimeout(c)
|
lookup_timeout, err := getStaticMapLookupTimeout(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -425,15 +419,16 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
|
|||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
for k, v := range shm {
|
for k, v := range shm {
|
||||||
vpnIp, err := netip.ParseAddr(fmt.Sprintf("%v", k))
|
rip := net.ParseIP(fmt.Sprintf("%v", k))
|
||||||
if err != nil {
|
if rip == nil {
|
||||||
return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, err)
|
return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !lh.myVpnNet.Contains(vpnIp) {
|
if !tunCidr.Contains(rip) {
|
||||||
return util.NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": vpnIp, "network": lh.myVpnNet, "entry": i + 1}, nil)
|
return util.NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": rip, "network": tunCidr.String(), "entry": i + 1}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vpnIp := iputil.Ip2VpnIp(rip)
|
||||||
vals, ok := v.([]interface{})
|
vals, ok := v.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
vals = []interface{}{v}
|
vals = []interface{}{v}
|
||||||
@@ -443,7 +438,7 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
|
|||||||
remoteAddrs = append(remoteAddrs, fmt.Sprintf("%v", v))
|
remoteAddrs = append(remoteAddrs, fmt.Sprintf("%v", v))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = lh.addStaticRemotes(i, d, network, lookupTimeout, vpnIp, remoteAddrs, staticList)
|
err := lh.addStaticRemotes(i, d, network, lookup_timeout, vpnIp, remoteAddrs, staticList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -453,7 +448,7 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) Query(ip netip.Addr) *RemoteList {
|
func (lh *LightHouse) Query(ip iputil.VpnIp) *RemoteList {
|
||||||
if !lh.IsLighthouseIP(ip) {
|
if !lh.IsLighthouseIP(ip) {
|
||||||
lh.QueryServer(ip)
|
lh.QueryServer(ip)
|
||||||
}
|
}
|
||||||
@@ -467,7 +462,7 @@ func (lh *LightHouse) Query(ip netip.Addr) *RemoteList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// QueryServer is asynchronous so no reply should be expected
|
// QueryServer is asynchronous so no reply should be expected
|
||||||
func (lh *LightHouse) QueryServer(ip netip.Addr) {
|
func (lh *LightHouse) QueryServer(ip iputil.VpnIp) {
|
||||||
// Don't put lighthouse ips in the query channel because we can't query lighthouses about lighthouses
|
// Don't put lighthouse ips in the query channel because we can't query lighthouses about lighthouses
|
||||||
if lh.amLighthouse || lh.IsLighthouseIP(ip) {
|
if lh.amLighthouse || lh.IsLighthouseIP(ip) {
|
||||||
return
|
return
|
||||||
@@ -476,7 +471,7 @@ func (lh *LightHouse) QueryServer(ip netip.Addr) {
|
|||||||
lh.queryChan <- ip
|
lh.queryChan <- ip
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) QueryCache(ip netip.Addr) *RemoteList {
|
func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList {
|
||||||
lh.RLock()
|
lh.RLock()
|
||||||
if v, ok := lh.addrMap[ip]; ok {
|
if v, ok := lh.addrMap[ip]; ok {
|
||||||
lh.RUnlock()
|
lh.RUnlock()
|
||||||
@@ -493,7 +488,7 @@ func (lh *LightHouse) QueryCache(ip netip.Addr) *RemoteList {
|
|||||||
// queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing
|
// queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing
|
||||||
// details from the remote list. It looks for a hit in the addrMap and a hit in the RemoteList under the owner vpnIp
|
// details from the remote list. It looks for a hit in the addrMap and a hit in the RemoteList under the owner vpnIp
|
||||||
// If one is found then f() is called with proper locking, f() must return result of n.MarshalTo()
|
// If one is found then f() is called with proper locking, f() must return result of n.MarshalTo()
|
||||||
func (lh *LightHouse) queryAndPrepMessage(vpnIp netip.Addr, f func(*cache) (int, error)) (bool, int, error) {
|
func (lh *LightHouse) queryAndPrepMessage(vpnIp iputil.VpnIp, f func(*cache) (int, error)) (bool, int, error) {
|
||||||
lh.RLock()
|
lh.RLock()
|
||||||
// Do we have an entry in the main cache?
|
// Do we have an entry in the main cache?
|
||||||
if v, ok := lh.addrMap[vpnIp]; ok {
|
if v, ok := lh.addrMap[vpnIp]; ok {
|
||||||
@@ -516,7 +511,7 @@ func (lh *LightHouse) queryAndPrepMessage(vpnIp netip.Addr, f func(*cache) (int,
|
|||||||
return false, 0, nil
|
return false, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) DeleteVpnIp(vpnIp netip.Addr) {
|
func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
|
||||||
// First we check the static mapping
|
// First we check the static mapping
|
||||||
// and do nothing if it is there
|
// and do nothing if it is there
|
||||||
if _, ok := lh.GetStaticHostList()[vpnIp]; ok {
|
if _, ok := lh.GetStaticHostList()[vpnIp]; ok {
|
||||||
@@ -537,7 +532,7 @@ func (lh *LightHouse) DeleteVpnIp(vpnIp netip.Addr) {
|
|||||||
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
|
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
|
||||||
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
|
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
|
||||||
// NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
|
// NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
|
||||||
func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, timeout time.Duration, vpnIp netip.Addr, toAddrs []string, staticList map[netip.Addr]struct{}) error {
|
func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, timeout time.Duration, vpnIp iputil.VpnIp, toAddrs []string, staticList map[iputil.VpnIp]struct{}) error {
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
am := lh.unlockedGetRemoteList(vpnIp)
|
am := lh.unlockedGetRemoteList(vpnIp)
|
||||||
am.Lock()
|
am.Lock()
|
||||||
@@ -558,14 +553,20 @@ func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, t
|
|||||||
am.unlockedSetHostnamesResults(hr)
|
am.unlockedSetHostnamesResults(hr)
|
||||||
|
|
||||||
for _, addrPort := range hr.GetIPs() {
|
for _, addrPort := range hr.GetIPs() {
|
||||||
if !lh.shouldAdd(vpnIp, addrPort.Addr()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch {
|
switch {
|
||||||
case addrPort.Addr().Is4():
|
case addrPort.Addr().Is4():
|
||||||
am.unlockedPrependV4(lh.myVpnNet.Addr(), NewIp4AndPortFromNetIP(addrPort.Addr(), addrPort.Port()))
|
to := NewIp4AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
|
||||||
|
if !lh.unlockedShouldAddV4(vpnIp, to) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
am.unlockedPrependV4(lh.myVpnIp, to)
|
||||||
case addrPort.Addr().Is6():
|
case addrPort.Addr().Is6():
|
||||||
am.unlockedPrependV6(lh.myVpnNet.Addr(), NewIp6AndPortFromNetIP(addrPort.Addr(), addrPort.Port()))
|
to := NewIp6AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
|
||||||
|
if !lh.unlockedShouldAddV6(vpnIp, to) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
am.unlockedPrependV6(lh.myVpnIp, to)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -577,12 +578,12 @@ func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, t
|
|||||||
// addCalculatedRemotes adds any calculated remotes based on the
|
// addCalculatedRemotes adds any calculated remotes based on the
|
||||||
// lighthouse.calculated_remotes configuration. It returns true if any
|
// lighthouse.calculated_remotes configuration. It returns true if any
|
||||||
// calculated remotes were added
|
// calculated remotes were added
|
||||||
func (lh *LightHouse) addCalculatedRemotes(vpnIp netip.Addr) bool {
|
func (lh *LightHouse) addCalculatedRemotes(vpnIp iputil.VpnIp) bool {
|
||||||
tree := lh.getCalculatedRemotes()
|
tree := lh.getCalculatedRemotes()
|
||||||
if tree == nil {
|
if tree == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
calculatedRemotes, ok := tree.Lookup(vpnIp)
|
ok, calculatedRemotes := tree.MostSpecificContains(vpnIp)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -601,13 +602,13 @@ func (lh *LightHouse) addCalculatedRemotes(vpnIp netip.Addr) bool {
|
|||||||
defer am.Unlock()
|
defer am.Unlock()
|
||||||
lh.Unlock()
|
lh.Unlock()
|
||||||
|
|
||||||
am.unlockedSetV4(lh.myVpnNet.Addr(), vpnIp, calculated, lh.unlockedShouldAddV4)
|
am.unlockedSetV4(lh.myVpnIp, vpnIp, calculated, lh.unlockedShouldAddV4)
|
||||||
|
|
||||||
return len(calculated) > 0
|
return len(calculated) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// unlockedGetRemoteList assumes you have the lh lock
|
// unlockedGetRemoteList assumes you have the lh lock
|
||||||
func (lh *LightHouse) unlockedGetRemoteList(vpnIp netip.Addr) *RemoteList {
|
func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
|
||||||
am, ok := lh.addrMap[vpnIp]
|
am, ok := lh.addrMap[vpnIp]
|
||||||
if !ok {
|
if !ok {
|
||||||
am = NewRemoteList(func(a netip.Addr) bool { return lh.shouldAdd(vpnIp, a) })
|
am = NewRemoteList(func(a netip.Addr) bool { return lh.shouldAdd(vpnIp, a) })
|
||||||
@@ -616,27 +617,44 @@ func (lh *LightHouse) unlockedGetRemoteList(vpnIp netip.Addr) *RemoteList {
|
|||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) shouldAdd(vpnIp netip.Addr, to netip.Addr) bool {
|
func (lh *LightHouse) shouldAdd(vpnIp iputil.VpnIp, to netip.Addr) bool {
|
||||||
allow := lh.GetRemoteAllowList().Allow(vpnIp, to)
|
switch {
|
||||||
if lh.l.Level >= logrus.TraceLevel {
|
case to.Is4():
|
||||||
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
ipBytes := to.As4()
|
||||||
}
|
ip := iputil.Ip2VpnIp(ipBytes[:])
|
||||||
if !allow || lh.myVpnNet.Contains(to) {
|
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, ip)
|
||||||
return false
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
}
|
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case to.Is6():
|
||||||
|
ipBytes := to.As16()
|
||||||
|
|
||||||
|
hi := binary.BigEndian.Uint64(ipBytes[:8])
|
||||||
|
lo := binary.BigEndian.Uint64(ipBytes[8:])
|
||||||
|
allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, hi, lo)
|
||||||
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
|
lh.l.WithField("remoteIp", to).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't check our vpn network here because nebula does not support ipv6 on the inside
|
||||||
|
if !allow {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// unlockedShouldAddV4 checks if to is allowed by our allow list
|
// unlockedShouldAddV4 checks if to is allowed by our allow list
|
||||||
func (lh *LightHouse) unlockedShouldAddV4(vpnIp netip.Addr, to *Ip4AndPort) bool {
|
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
|
||||||
ip := AddrPortFromIp4AndPort(to)
|
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
|
||||||
allow := lh.GetRemoteAllowList().Allow(vpnIp, ip.Addr())
|
|
||||||
if lh.l.Level >= logrus.TraceLevel {
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !allow || lh.myVpnNet.Contains(ip.Addr()) {
|
if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.VpnIp(to.Ip)) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -644,14 +662,14 @@ func (lh *LightHouse) unlockedShouldAddV4(vpnIp netip.Addr, to *Ip4AndPort) bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
// unlockedShouldAddV6 checks if to is allowed by our allow list
|
// unlockedShouldAddV6 checks if to is allowed by our allow list
|
||||||
func (lh *LightHouse) unlockedShouldAddV6(vpnIp netip.Addr, to *Ip6AndPort) bool {
|
func (lh *LightHouse) unlockedShouldAddV6(vpnIp iputil.VpnIp, to *Ip6AndPort) bool {
|
||||||
ip := AddrPortFromIp6AndPort(to)
|
allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, to.Hi, to.Lo)
|
||||||
allow := lh.GetRemoteAllowList().Allow(vpnIp, ip.Addr())
|
|
||||||
if lh.l.Level >= logrus.TraceLevel {
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
lh.l.WithField("remoteIp", lhIp6ToIp(to)).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
lh.l.WithField("remoteIp", lhIp6ToIp(to)).WithField("allow", allow).Trace("remoteAllowList.Allow")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !allow || lh.myVpnNet.Contains(ip.Addr()) {
|
// We don't check our vpn network here because nebula does not support ipv6 on the inside
|
||||||
|
if !allow {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -665,39 +683,26 @@ func lhIp6ToIp(v *Ip6AndPort) net.IP {
|
|||||||
return ip
|
return ip
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) IsLighthouseIP(vpnIp netip.Addr) bool {
|
func (lh *LightHouse) IsLighthouseIP(vpnIp iputil.VpnIp) bool {
|
||||||
if _, ok := lh.GetLighthouses()[vpnIp]; ok {
|
if _, ok := lh.GetLighthouses()[vpnIp]; ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLhQueryByInt(vpnIp netip.Addr) *NebulaMeta {
|
func NewLhQueryByInt(VpnIp iputil.VpnIp) *NebulaMeta {
|
||||||
if vpnIp.Is6() {
|
|
||||||
//TODO: need to support ipv6
|
|
||||||
panic("ipv6 is not yet supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := vpnIp.As4()
|
|
||||||
return &NebulaMeta{
|
return &NebulaMeta{
|
||||||
Type: NebulaMeta_HostQuery,
|
Type: NebulaMeta_HostQuery,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
VpnIp: binary.BigEndian.Uint32(b[:]),
|
VpnIp: uint32(VpnIp),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func AddrPortFromIp4AndPort(ip *Ip4AndPort) netip.AddrPort {
|
func NewIp4AndPort(ip net.IP, port uint32) *Ip4AndPort {
|
||||||
b := [4]byte{}
|
ipp := Ip4AndPort{Port: port}
|
||||||
binary.BigEndian.PutUint32(b[:], ip.Ip)
|
ipp.Ip = uint32(iputil.Ip2VpnIp(ip))
|
||||||
return netip.AddrPortFrom(netip.AddrFrom4(b), uint16(ip.Port))
|
return &ipp
|
||||||
}
|
|
||||||
|
|
||||||
func AddrPortFromIp6AndPort(ip *Ip6AndPort) netip.AddrPort {
|
|
||||||
b := [16]byte{}
|
|
||||||
binary.BigEndian.PutUint64(b[:8], ip.Hi)
|
|
||||||
binary.BigEndian.PutUint64(b[8:], ip.Lo)
|
|
||||||
return netip.AddrPortFrom(netip.AddrFrom16(b), uint16(ip.Port))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIp4AndPortFromNetIP(ip netip.Addr, port uint16) *Ip4AndPort {
|
func NewIp4AndPortFromNetIP(ip netip.Addr, port uint16) *Ip4AndPort {
|
||||||
@@ -708,7 +713,14 @@ func NewIp4AndPortFromNetIP(ip netip.Addr, port uint16) *Ip4AndPort {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: IPV6-WORK we can delete some more of these
|
func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
|
||||||
|
return &Ip6AndPort{
|
||||||
|
Hi: binary.BigEndian.Uint64(ip[:8]),
|
||||||
|
Lo: binary.BigEndian.Uint64(ip[8:]),
|
||||||
|
Port: port,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewIp6AndPortFromNetIP(ip netip.Addr, port uint16) *Ip6AndPort {
|
func NewIp6AndPortFromNetIP(ip netip.Addr, port uint16) *Ip6AndPort {
|
||||||
ip6Addr := ip.As16()
|
ip6Addr := ip.As16()
|
||||||
return &Ip6AndPort{
|
return &Ip6AndPort{
|
||||||
@@ -717,6 +729,17 @@ func NewIp6AndPortFromNetIP(ip netip.Addr, port uint16) *Ip6AndPort {
|
|||||||
Port: uint32(port),
|
Port: uint32(port),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr {
|
||||||
|
ip := ipp.Ip
|
||||||
|
return udp.NewAddr(
|
||||||
|
net.IPv4(byte(ip&0xff000000>>24), byte(ip&0x00ff0000>>16), byte(ip&0x0000ff00>>8), byte(ip&0x000000ff)),
|
||||||
|
uint16(ipp.Port),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
|
||||||
|
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
|
||||||
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) startQueryWorker() {
|
func (lh *LightHouse) startQueryWorker() {
|
||||||
if lh.amLighthouse {
|
if lh.amLighthouse {
|
||||||
@@ -738,7 +761,7 @@ func (lh *LightHouse) startQueryWorker() {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) innerQueryServer(ip netip.Addr, nb, out []byte) {
|
func (lh *LightHouse) innerQueryServer(ip iputil.VpnIp, nb, out []byte) {
|
||||||
if lh.IsLighthouseIP(ip) {
|
if lh.IsLighthouseIP(ip) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -789,41 +812,36 @@ func (lh *LightHouse) SendUpdate() {
|
|||||||
var v6 []*Ip6AndPort
|
var v6 []*Ip6AndPort
|
||||||
|
|
||||||
for _, e := range lh.GetAdvertiseAddrs() {
|
for _, e := range lh.GetAdvertiseAddrs() {
|
||||||
if e.Addr().Is4() {
|
if ip := e.ip.To4(); ip != nil {
|
||||||
v4 = append(v4, NewIp4AndPortFromNetIP(e.Addr(), e.Port()))
|
v4 = append(v4, NewIp4AndPort(e.ip, uint32(e.port)))
|
||||||
} else {
|
} else {
|
||||||
v6 = append(v6, NewIp6AndPortFromNetIP(e.Addr(), e.Port()))
|
v6 = append(v6, NewIp6AndPort(e.ip, uint32(e.port)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lal := lh.GetLocalAllowList()
|
lal := lh.GetLocalAllowList()
|
||||||
for _, e := range localIps(lh.l, lal) {
|
for _, e := range *localIps(lh.l, lal) {
|
||||||
if lh.myVpnNet.Contains(e) {
|
if ip4 := e.To4(); ip4 != nil && ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.Ip2VpnIp(ip4)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only add IPs that aren't my VPN/tun IP
|
// Only add IPs that aren't my VPN/tun IP
|
||||||
if e.Is4() {
|
if ip := e.To4(); ip != nil {
|
||||||
v4 = append(v4, NewIp4AndPortFromNetIP(e, uint16(lh.nebulaPort)))
|
v4 = append(v4, NewIp4AndPort(e, lh.nebulaPort))
|
||||||
} else {
|
} else {
|
||||||
v6 = append(v6, NewIp6AndPortFromNetIP(e, uint16(lh.nebulaPort)))
|
v6 = append(v6, NewIp6AndPort(e, lh.nebulaPort))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var relays []uint32
|
var relays []uint32
|
||||||
for _, r := range lh.GetRelaysForMe() {
|
for _, r := range lh.GetRelaysForMe() {
|
||||||
//TODO: IPV6-WORK both relays and vpnip need ipv6 support
|
relays = append(relays, (uint32)(r))
|
||||||
b := r.As4()
|
|
||||||
relays = append(relays, binary.BigEndian.Uint32(b[:]))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: IPV6-WORK both relays and vpnip need ipv6 support
|
|
||||||
b := lh.myVpnNet.Addr().As4()
|
|
||||||
|
|
||||||
m := &NebulaMeta{
|
m := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostUpdateNotification,
|
Type: NebulaMeta_HostUpdateNotification,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
VpnIp: binary.BigEndian.Uint32(b[:]),
|
VpnIp: uint32(lh.myVpnIp),
|
||||||
Ip4AndPorts: v4,
|
Ip4AndPorts: v4,
|
||||||
Ip6AndPorts: v6,
|
Ip6AndPorts: v6,
|
||||||
RelayVpnIp: relays,
|
RelayVpnIp: relays,
|
||||||
@@ -895,12 +913,12 @@ func (lhh *LightHouseHandler) resetMeta() *NebulaMeta {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func lhHandleRequest(lhh *LightHouseHandler, f *Interface) udp.LightHouseHandlerFunc {
|
func lhHandleRequest(lhh *LightHouseHandler, f *Interface) udp.LightHouseHandlerFunc {
|
||||||
return func(rAddr netip.AddrPort, vpnIp netip.Addr, p []byte) {
|
return func(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte) {
|
||||||
lhh.HandleRequest(rAddr, vpnIp, p, f)
|
lhh.HandleRequest(rAddr, vpnIp, p, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) HandleRequest(rAddr netip.AddrPort, vpnIp netip.Addr, p []byte, w EncWriter) {
|
func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w EncWriter) {
|
||||||
n := lhh.resetMeta()
|
n := lhh.resetMeta()
|
||||||
err := n.Unmarshal(p)
|
err := n.Unmarshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -938,7 +956,7 @@ func (lhh *LightHouseHandler) HandleRequest(rAddr netip.AddrPort, vpnIp netip.Ad
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp netip.Addr, addr netip.AddrPort, w EncWriter) {
|
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w EncWriter) {
|
||||||
// Exit if we don't answer queries
|
// Exit if we don't answer queries
|
||||||
if !lhh.lh.amLighthouse {
|
if !lhh.lh.amLighthouse {
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
@@ -949,14 +967,8 @@ func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp netip.Addr, a
|
|||||||
|
|
||||||
//TODO: we can DRY this further
|
//TODO: we can DRY this further
|
||||||
reqVpnIp := n.Details.VpnIp
|
reqVpnIp := n.Details.VpnIp
|
||||||
|
|
||||||
//TODO: IPV6-WORK
|
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.VpnIp)
|
|
||||||
queryVpnIp := netip.AddrFrom4(b)
|
|
||||||
|
|
||||||
//TODO: Maybe instead of marshalling into n we marshal into a new `r` to not nuke our current request data
|
//TODO: Maybe instead of marshalling into n we marshal into a new `r` to not nuke our current request data
|
||||||
found, ln, err := lhh.lh.queryAndPrepMessage(queryVpnIp, func(c *cache) (int, error) {
|
found, ln, err := lhh.lh.queryAndPrepMessage(iputil.VpnIp(n.Details.VpnIp), func(c *cache) (int, error) {
|
||||||
n = lhh.resetMeta()
|
n = lhh.resetMeta()
|
||||||
n.Type = NebulaMeta_HostQueryReply
|
n.Type = NebulaMeta_HostQueryReply
|
||||||
n.Details.VpnIp = reqVpnIp
|
n.Details.VpnIp = reqVpnIp
|
||||||
@@ -982,9 +994,8 @@ func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp netip.Addr, a
|
|||||||
found, ln, err = lhh.lh.queryAndPrepMessage(vpnIp, func(c *cache) (int, error) {
|
found, ln, err = lhh.lh.queryAndPrepMessage(vpnIp, func(c *cache) (int, error) {
|
||||||
n = lhh.resetMeta()
|
n = lhh.resetMeta()
|
||||||
n.Type = NebulaMeta_HostPunchNotification
|
n.Type = NebulaMeta_HostPunchNotification
|
||||||
//TODO: IPV6-WORK
|
n.Details.VpnIp = uint32(vpnIp)
|
||||||
b = vpnIp.As4()
|
|
||||||
n.Details.VpnIp = binary.BigEndian.Uint32(b[:])
|
|
||||||
lhh.coalesceAnswers(c, n)
|
lhh.coalesceAnswers(c, n)
|
||||||
|
|
||||||
return n.MarshalTo(lhh.pb)
|
return n.MarshalTo(lhh.pb)
|
||||||
@@ -1000,11 +1011,7 @@ func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp netip.Addr, a
|
|||||||
}
|
}
|
||||||
|
|
||||||
lhh.lh.metricTx(NebulaMeta_HostPunchNotification, 1)
|
lhh.lh.metricTx(NebulaMeta_HostPunchNotification, 1)
|
||||||
|
w.SendMessageToVpnIp(header.LightHouse, 0, iputil.VpnIp(reqVpnIp), lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
||||||
//TODO: IPV6-WORK
|
|
||||||
binary.BigEndian.PutUint32(b[:], reqVpnIp)
|
|
||||||
sendTo := netip.AddrFrom4(b)
|
|
||||||
w.SendMessageToVpnIp(header.LightHouse, 0, sendTo, lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) coalesceAnswers(c *cache, n *NebulaMeta) {
|
func (lhh *LightHouseHandler) coalesceAnswers(c *cache, n *NebulaMeta) {
|
||||||
@@ -1027,52 +1034,34 @@ func (lhh *LightHouseHandler) coalesceAnswers(c *cache, n *NebulaMeta) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if c.relay != nil {
|
if c.relay != nil {
|
||||||
//TODO: IPV6-WORK
|
n.Details.RelayVpnIp = append(n.Details.RelayVpnIp, c.relay.relay...)
|
||||||
relays := make([]uint32, len(c.relay.relay))
|
|
||||||
b := [4]byte{}
|
|
||||||
for i, _ := range relays {
|
|
||||||
b = c.relay.relay[i].As4()
|
|
||||||
relays[i] = binary.BigEndian.Uint32(b[:])
|
|
||||||
}
|
|
||||||
n.Details.RelayVpnIp = append(n.Details.RelayVpnIp, relays...)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, vpnIp netip.Addr) {
|
func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, vpnIp iputil.VpnIp) {
|
||||||
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lhh.lh.Lock()
|
lhh.lh.Lock()
|
||||||
//TODO: IPV6-WORK
|
am := lhh.lh.unlockedGetRemoteList(iputil.VpnIp(n.Details.VpnIp))
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.VpnIp)
|
|
||||||
certVpnIp := netip.AddrFrom4(b)
|
|
||||||
am := lhh.lh.unlockedGetRemoteList(certVpnIp)
|
|
||||||
am.Lock()
|
am.Lock()
|
||||||
lhh.lh.Unlock()
|
lhh.lh.Unlock()
|
||||||
|
|
||||||
//TODO: IPV6-WORK
|
certVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
|
am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
|
||||||
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
||||||
|
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
|
||||||
//TODO: IPV6-WORK
|
|
||||||
relays := make([]netip.Addr, len(n.Details.RelayVpnIp))
|
|
||||||
for i, _ := range n.Details.RelayVpnIp {
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.RelayVpnIp[i])
|
|
||||||
relays[i] = netip.AddrFrom4(b)
|
|
||||||
}
|
|
||||||
am.unlockedSetRelay(vpnIp, certVpnIp, relays)
|
|
||||||
am.Unlock()
|
am.Unlock()
|
||||||
|
|
||||||
// Non-blocking attempt to trigger, skip if it would block
|
// Non-blocking attempt to trigger, skip if it would block
|
||||||
select {
|
select {
|
||||||
case lhh.lh.handshakeTrigger <- certVpnIp:
|
case lhh.lh.handshakeTrigger <- iputil.VpnIp(n.Details.VpnIp):
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp netip.Addr, w EncWriter) {
|
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
|
||||||
if !lhh.lh.amLighthouse {
|
if !lhh.lh.amLighthouse {
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
|
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
|
||||||
@@ -1081,13 +1070,9 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Simple check that the host sent this not someone else
|
//Simple check that the host sent this not someone else
|
||||||
//TODO: IPV6-WORK
|
if n.Details.VpnIp != uint32(vpnIp) {
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.VpnIp)
|
|
||||||
detailsVpnIp := netip.AddrFrom4(b)
|
|
||||||
if detailsVpnIp != vpnIp {
|
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.WithField("vpnIp", vpnIp).WithField("answer", detailsVpnIp).Debugln("Host sent invalid update")
|
lhh.l.WithField("vpnIp", vpnIp).WithField("answer", iputil.VpnIp(n.Details.VpnIp)).Debugln("Host sent invalid update")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1097,24 +1082,15 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp
|
|||||||
am.Lock()
|
am.Lock()
|
||||||
lhh.lh.Unlock()
|
lhh.lh.Unlock()
|
||||||
|
|
||||||
am.unlockedSetV4(vpnIp, detailsVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
|
certVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
am.unlockedSetV6(vpnIp, detailsVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
|
||||||
|
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
|
||||||
//TODO: IPV6-WORK
|
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
|
||||||
relays := make([]netip.Addr, len(n.Details.RelayVpnIp))
|
|
||||||
for i, _ := range n.Details.RelayVpnIp {
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.RelayVpnIp[i])
|
|
||||||
relays[i] = netip.AddrFrom4(b)
|
|
||||||
}
|
|
||||||
am.unlockedSetRelay(vpnIp, detailsVpnIp, relays)
|
|
||||||
am.Unlock()
|
am.Unlock()
|
||||||
|
|
||||||
n = lhh.resetMeta()
|
n = lhh.resetMeta()
|
||||||
n.Type = NebulaMeta_HostUpdateNotificationAck
|
n.Type = NebulaMeta_HostUpdateNotificationAck
|
||||||
|
n.Details.VpnIp = uint32(vpnIp)
|
||||||
//TODO: IPV6-WORK
|
|
||||||
vpnIpB := vpnIp.As4()
|
|
||||||
n.Details.VpnIp = binary.BigEndian.Uint32(vpnIpB[:])
|
|
||||||
ln, err := n.MarshalTo(lhh.pb)
|
ln, err := n.MarshalTo(lhh.pb)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1126,14 +1102,14 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp
|
|||||||
w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp netip.Addr, w EncWriter) {
|
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
|
||||||
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
if !lhh.lh.IsLighthouseIP(vpnIp) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
empty := []byte{0}
|
empty := []byte{0}
|
||||||
punch := func(vpnPeer netip.AddrPort) {
|
punch := func(vpnPeer *udp.Addr) {
|
||||||
if !vpnPeer.IsValid() {
|
if vpnPeer == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1145,29 +1121,23 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp n
|
|||||||
|
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
//TODO: lacking the ip we are actually punching on, old: l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp))
|
//TODO: lacking the ip we are actually punching on, old: l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp))
|
||||||
//TODO: IPV6-WORK, make this debug line not suck
|
lhh.l.Debugf("Punching on %d for %s", vpnPeer.Port, iputil.VpnIp(n.Details.VpnIp))
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.VpnIp)
|
|
||||||
lhh.l.Debugf("Punching on %d for %v", vpnPeer.Port(), netip.AddrFrom4(b))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, a := range n.Details.Ip4AndPorts {
|
for _, a := range n.Details.Ip4AndPorts {
|
||||||
punch(AddrPortFromIp4AndPort(a))
|
punch(NewUDPAddrFromLH4(a))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, a := range n.Details.Ip6AndPorts {
|
for _, a := range n.Details.Ip6AndPorts {
|
||||||
punch(AddrPortFromIp6AndPort(a))
|
punch(NewUDPAddrFromLH6(a))
|
||||||
}
|
}
|
||||||
|
|
||||||
// This sends a nebula test packet to the host trying to contact us. In the case
|
// This sends a nebula test packet to the host trying to contact us. In the case
|
||||||
// of a double nat or other difficult scenario, this may help establish
|
// of a double nat or other difficult scenario, this may help establish
|
||||||
// a tunnel.
|
// a tunnel.
|
||||||
if lhh.lh.punchy.GetRespond() {
|
if lhh.lh.punchy.GetRespond() {
|
||||||
//TODO: IPV6-WORK
|
queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.VpnIp)
|
|
||||||
queryVpnIp := netip.AddrFrom4(b)
|
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(lhh.lh.punchy.GetRespondDelay())
|
time.Sleep(lhh.lh.punchy.GetRespondDelay())
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
@@ -1180,3 +1150,9 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp n
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ipMaskContains checks if testIp is contained by ip after applying a cidr
|
||||||
|
// zeros is 32 - bits from net.IPMask.Size()
|
||||||
|
func ipMaskContains(ip iputil.VpnIp, zeros iputil.VpnIp, testIp iputil.VpnIp) bool {
|
||||||
|
return (testIp^ip)>>zeros == 0
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,14 +2,15 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
@@ -22,17 +23,15 @@ func TestOldIPv4Only(t *testing.T) {
|
|||||||
var m Ip4AndPort
|
var m Ip4AndPort
|
||||||
err := m.Unmarshal(b)
|
err := m.Unmarshal(b)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
ip := netip.MustParseAddr("10.1.1.1")
|
assert.Equal(t, "10.1.1.1", iputil.VpnIp(m.GetIp()).String())
|
||||||
bp := ip.As4()
|
|
||||||
assert.Equal(t, binary.BigEndian.Uint32(bp[:]), m.GetIp())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewLhQuery(t *testing.T) {
|
func TestNewLhQuery(t *testing.T) {
|
||||||
myIp, err := netip.ParseAddr("192.1.1.1")
|
myIp := net.ParseIP("192.1.1.1")
|
||||||
assert.NoError(t, err)
|
myIpint := iputil.Ip2VpnIp(myIp)
|
||||||
|
|
||||||
// Generating a new lh query should work
|
// Generating a new lh query should work
|
||||||
a := NewLhQueryByInt(myIp)
|
a := NewLhQueryByInt(myIpint)
|
||||||
|
|
||||||
// The result should be a nebulameta protobuf
|
// The result should be a nebulameta protobuf
|
||||||
assert.IsType(t, &NebulaMeta{}, a)
|
assert.IsType(t, &NebulaMeta{}, a)
|
||||||
@@ -50,7 +49,7 @@ func TestNewLhQuery(t *testing.T) {
|
|||||||
|
|
||||||
func Test_lhStaticMapping(t *testing.T) {
|
func Test_lhStaticMapping(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16")
|
||||||
lh1 := "10.128.0.2"
|
lh1 := "10.128.0.2"
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
@@ -69,7 +68,7 @@ func Test_lhStaticMapping(t *testing.T) {
|
|||||||
|
|
||||||
func TestReloadLighthouseInterval(t *testing.T) {
|
func TestReloadLighthouseInterval(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16")
|
||||||
lh1 := "10.128.0.2"
|
lh1 := "10.128.0.2"
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
@@ -84,21 +83,21 @@ func TestReloadLighthouseInterval(t *testing.T) {
|
|||||||
lh.ifce = &mockEncWriter{}
|
lh.ifce = &mockEncWriter{}
|
||||||
|
|
||||||
// The first one routine is kicked off by main.go currently, lets make sure that one dies
|
// The first one routine is kicked off by main.go currently, lets make sure that one dies
|
||||||
assert.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 5"))
|
c.ReloadConfigString("lighthouse:\n interval: 5")
|
||||||
assert.Equal(t, int64(5), lh.interval.Load())
|
assert.Equal(t, int64(5), lh.interval.Load())
|
||||||
|
|
||||||
// Subsequent calls are killed off by the LightHouse.Reload function
|
// Subsequent calls are killed off by the LightHouse.Reload function
|
||||||
assert.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 10"))
|
c.ReloadConfigString("lighthouse:\n interval: 10")
|
||||||
assert.Equal(t, int64(10), lh.interval.Load())
|
assert.Equal(t, int64(10), lh.interval.Load())
|
||||||
|
|
||||||
// If this completes then nothing is stealing our reload routine
|
// If this completes then nothing is stealing our reload routine
|
||||||
assert.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 11"))
|
c.ReloadConfigString("lighthouse:\n interval: 11")
|
||||||
assert.Equal(t, int64(11), lh.interval.Load())
|
assert.Equal(t, int64(11), lh.interval.Load())
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/0")
|
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
@@ -106,33 +105,30 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
b.Fatal()
|
b.Fatal()
|
||||||
}
|
}
|
||||||
|
|
||||||
hAddr := netip.MustParseAddrPort("4.5.6.7:12345")
|
hAddr := udp.NewAddrFromString("4.5.6.7:12345")
|
||||||
hAddr2 := netip.MustParseAddrPort("4.5.6.7:12346")
|
hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
|
||||||
|
lh.addrMap[3] = NewRemoteList(nil)
|
||||||
vpnIp3 := netip.MustParseAddr("0.0.0.3")
|
lh.addrMap[3].unlockedSetV4(
|
||||||
lh.addrMap[vpnIp3] = NewRemoteList(nil)
|
3,
|
||||||
lh.addrMap[vpnIp3].unlockedSetV4(
|
3,
|
||||||
vpnIp3,
|
|
||||||
vpnIp3,
|
|
||||||
[]*Ip4AndPort{
|
[]*Ip4AndPort{
|
||||||
NewIp4AndPortFromNetIP(hAddr.Addr(), hAddr.Port()),
|
NewIp4AndPort(hAddr.IP, uint32(hAddr.Port)),
|
||||||
NewIp4AndPortFromNetIP(hAddr2.Addr(), hAddr2.Port()),
|
NewIp4AndPort(hAddr2.IP, uint32(hAddr2.Port)),
|
||||||
},
|
},
|
||||||
func(netip.Addr, *Ip4AndPort) bool { return true },
|
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
|
||||||
)
|
)
|
||||||
|
|
||||||
rAddr := netip.MustParseAddrPort("1.2.2.3:12345")
|
rAddr := udp.NewAddrFromString("1.2.2.3:12345")
|
||||||
rAddr2 := netip.MustParseAddrPort("1.2.2.3:12346")
|
rAddr2 := udp.NewAddrFromString("1.2.2.3:12346")
|
||||||
vpnIp2 := netip.MustParseAddr("0.0.0.3")
|
lh.addrMap[2] = NewRemoteList(nil)
|
||||||
lh.addrMap[vpnIp2] = NewRemoteList(nil)
|
lh.addrMap[2].unlockedSetV4(
|
||||||
lh.addrMap[vpnIp2].unlockedSetV4(
|
3,
|
||||||
vpnIp3,
|
3,
|
||||||
vpnIp3,
|
|
||||||
[]*Ip4AndPort{
|
[]*Ip4AndPort{
|
||||||
NewIp4AndPortFromNetIP(rAddr.Addr(), rAddr.Port()),
|
NewIp4AndPort(rAddr.IP, uint32(rAddr.Port)),
|
||||||
NewIp4AndPortFromNetIP(rAddr2.Addr(), rAddr2.Port()),
|
NewIp4AndPort(rAddr2.IP, uint32(rAddr2.Port)),
|
||||||
},
|
},
|
||||||
func(netip.Addr, *Ip4AndPort) bool { return true },
|
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
|
||||||
)
|
)
|
||||||
|
|
||||||
mw := &mockEncWriter{}
|
mw := &mockEncWriter{}
|
||||||
@@ -149,7 +145,7 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
p, err := req.Marshal()
|
p, err := req.Marshal()
|
||||||
assert.NoError(b, err)
|
assert.NoError(b, err)
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
lhh.HandleRequest(rAddr, vpnIp2, p, mw)
|
lhh.HandleRequest(rAddr, 2, p, mw)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run("found", func(b *testing.B) {
|
b.Run("found", func(b *testing.B) {
|
||||||
@@ -165,7 +161,7 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
assert.NoError(b, err)
|
assert.NoError(b, err)
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
lhh.HandleRequest(rAddr, vpnIp2, p, mw)
|
lhh.HandleRequest(rAddr, 2, p, mw)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -173,51 +169,51 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
func TestLighthouse_Memory(t *testing.T) {
|
func TestLighthouse_Memory(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
|
|
||||||
myUdpAddr0 := netip.MustParseAddrPort("10.0.0.2:4242")
|
myUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.2"), Port: 4242}
|
||||||
myUdpAddr1 := netip.MustParseAddrPort("192.168.0.2:4242")
|
myUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4242}
|
||||||
myUdpAddr2 := netip.MustParseAddrPort("172.16.0.2:4242")
|
myUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.2"), Port: 4242}
|
||||||
myUdpAddr3 := netip.MustParseAddrPort("100.152.0.2:4242")
|
myUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.2"), Port: 4242}
|
||||||
myUdpAddr4 := netip.MustParseAddrPort("24.15.0.2:4242")
|
myUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.2"), Port: 4242}
|
||||||
myUdpAddr5 := netip.MustParseAddrPort("192.168.0.2:4243")
|
myUdpAddr5 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4243}
|
||||||
myUdpAddr6 := netip.MustParseAddrPort("192.168.0.2:4244")
|
myUdpAddr6 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4244}
|
||||||
myUdpAddr7 := netip.MustParseAddrPort("192.168.0.2:4245")
|
myUdpAddr7 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4245}
|
||||||
myUdpAddr8 := netip.MustParseAddrPort("192.168.0.2:4246")
|
myUdpAddr8 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4246}
|
||||||
myUdpAddr9 := netip.MustParseAddrPort("192.168.0.2:4247")
|
myUdpAddr9 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4247}
|
||||||
myUdpAddr10 := netip.MustParseAddrPort("192.168.0.2:4248")
|
myUdpAddr10 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4248}
|
||||||
myUdpAddr11 := netip.MustParseAddrPort("192.168.0.2:4249")
|
myUdpAddr11 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4249}
|
||||||
myVpnIp := netip.MustParseAddr("10.128.0.2")
|
myVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.2"))
|
||||||
|
|
||||||
theirUdpAddr0 := netip.MustParseAddrPort("10.0.0.3:4242")
|
theirUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.3"), Port: 4242}
|
||||||
theirUdpAddr1 := netip.MustParseAddrPort("192.168.0.3:4242")
|
theirUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.3"), Port: 4242}
|
||||||
theirUdpAddr2 := netip.MustParseAddrPort("172.16.0.3:4242")
|
theirUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.3"), Port: 4242}
|
||||||
theirUdpAddr3 := netip.MustParseAddrPort("100.152.0.3:4242")
|
theirUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.3"), Port: 4242}
|
||||||
theirUdpAddr4 := netip.MustParseAddrPort("24.15.0.3:4242")
|
theirUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.3"), Port: 4242}
|
||||||
theirVpnIp := netip.MustParseAddr("10.128.0.3")
|
theirVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.3"))
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, netip.MustParsePrefix("10.128.0.1/24"), nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
lhh := lh.NewRequestHandler()
|
lhh := lh.NewRequestHandler()
|
||||||
|
|
||||||
// Test that my first update responds with just that
|
// Test that my first update responds with just that
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr1, myUdpAddr2}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr2}, lhh)
|
||||||
r := newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r := newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr2)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr2)
|
||||||
|
|
||||||
// Ensure we don't accumulate addresses
|
// Ensure we don't accumulate addresses
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr3}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr3}, lhh)
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr3)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr3)
|
||||||
|
|
||||||
// Grow it back to 2
|
// Grow it back to 2
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr1, myUdpAddr4}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr4}, lhh)
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
|
||||||
|
|
||||||
// Update a different host and ask about it
|
// Update a different host and ask about it
|
||||||
newLHHostUpdate(theirUdpAddr0, theirVpnIp, []netip.AddrPort{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh)
|
newLHHostUpdate(theirUdpAddr0, theirVpnIp, []*udp.Addr{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh)
|
||||||
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, theirVpnIp, lhh)
|
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, theirVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
|
||||||
|
|
||||||
@@ -237,7 +233,7 @@ func TestLighthouse_Memory(t *testing.T) {
|
|||||||
newLHHostUpdate(
|
newLHHostUpdate(
|
||||||
myUdpAddr0,
|
myUdpAddr0,
|
||||||
myVpnIp,
|
myVpnIp,
|
||||||
[]netip.AddrPort{
|
[]*udp.Addr{
|
||||||
myUdpAddr1,
|
myUdpAddr1,
|
||||||
myUdpAddr2,
|
myUdpAddr2,
|
||||||
myUdpAddr3,
|
myUdpAddr3,
|
||||||
@@ -260,10 +256,10 @@ func TestLighthouse_Memory(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Make sure we won't add ips in our vpn network
|
// Make sure we won't add ips in our vpn network
|
||||||
bad1 := netip.MustParseAddrPort("10.128.0.99:4242")
|
bad1 := &udp.Addr{IP: net.ParseIP("10.128.0.99"), Port: 4242}
|
||||||
bad2 := netip.MustParseAddrPort("10.128.0.100:4242")
|
bad2 := &udp.Addr{IP: net.ParseIP("10.128.0.100"), Port: 4242}
|
||||||
good := netip.MustParseAddrPort("1.128.0.99:4242")
|
good := &udp.Addr{IP: net.ParseIP("1.128.0.99"), Port: 4242}
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{bad1, bad2, good}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{bad1, bad2, good}, lhh)
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good)
|
||||||
}
|
}
|
||||||
@@ -273,7 +269,7 @@ func TestLighthouse_reload(t *testing.T) {
|
|||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, netip.MustParsePrefix("10.128.0.1/24"), nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
nc := map[interface{}]interface{}{
|
nc := map[interface{}]interface{}{
|
||||||
@@ -289,13 +285,11 @@ func TestLighthouse_reload(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLHHostRequest(fromAddr netip.AddrPort, myVpnIp, queryVpnIp netip.Addr, lhh *LightHouseHandler) testLhReply {
|
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
|
||||||
//TODO: IPV6-WORK
|
|
||||||
bip := queryVpnIp.As4()
|
|
||||||
req := &NebulaMeta{
|
req := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostQuery,
|
Type: NebulaMeta_HostQuery,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
VpnIp: binary.BigEndian.Uint32(bip[:]),
|
VpnIp: uint32(queryVpnIp),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -312,19 +306,17 @@ func newLHHostRequest(fromAddr netip.AddrPort, myVpnIp, queryVpnIp netip.Addr, l
|
|||||||
return w.lastReply
|
return w.lastReply
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLHHostUpdate(fromAddr netip.AddrPort, vpnIp netip.Addr, addrs []netip.AddrPort, lhh *LightHouseHandler) {
|
func newLHHostUpdate(fromAddr *udp.Addr, vpnIp iputil.VpnIp, addrs []*udp.Addr, lhh *LightHouseHandler) {
|
||||||
//TODO: IPV6-WORK
|
|
||||||
bip := vpnIp.As4()
|
|
||||||
req := &NebulaMeta{
|
req := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostUpdateNotification,
|
Type: NebulaMeta_HostUpdateNotification,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
VpnIp: binary.BigEndian.Uint32(bip[:]),
|
VpnIp: uint32(vpnIp),
|
||||||
Ip4AndPorts: make([]*Ip4AndPort, len(addrs)),
|
Ip4AndPorts: make([]*Ip4AndPort, len(addrs)),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range addrs {
|
for k, v := range addrs {
|
||||||
req.Details.Ip4AndPorts[k] = NewIp4AndPortFromNetIP(v.Addr(), v.Port())
|
req.Details.Ip4AndPorts[k] = &Ip4AndPort{Ip: uint32(iputil.Ip2VpnIp(v.IP)), Port: uint32(v.Port)}
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := req.Marshal()
|
b, err := req.Marshal()
|
||||||
@@ -402,10 +394,16 @@ func newLHHostUpdate(fromAddr netip.AddrPort, vpnIp netip.Addr, addrs []netip.Ad
|
|||||||
// )
|
// )
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
func Test_ipMaskContains(t *testing.T) {
|
||||||
|
assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.0.255"))))
|
||||||
|
assert.False(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1"))))
|
||||||
|
assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1"))))
|
||||||
|
}
|
||||||
|
|
||||||
type testLhReply struct {
|
type testLhReply struct {
|
||||||
nebType header.MessageType
|
nebType header.MessageType
|
||||||
nebSubType header.MessageSubType
|
nebSubType header.MessageSubType
|
||||||
vpnIp netip.Addr
|
vpnIp iputil.VpnIp
|
||||||
msg *NebulaMeta
|
msg *NebulaMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -416,7 +414,7 @@ type testEncWriter struct {
|
|||||||
|
|
||||||
func (tw *testEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
func (tw *testEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
||||||
}
|
}
|
||||||
func (tw *testEncWriter) Handshake(vpnIp netip.Addr) {
|
func (tw *testEncWriter) Handshake(vpnIp iputil.VpnIp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, _, _ []byte) {
|
func (tw *testEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, _, _ []byte) {
|
||||||
@@ -436,7 +434,7 @@ func (tw *testEncWriter) SendMessageToHostInfo(t header.MessageType, st header.M
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, _, _ []byte) {
|
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
|
||||||
msg := &NebulaMeta{}
|
msg := &NebulaMeta{}
|
||||||
err := msg.Unmarshal(p)
|
err := msg.Unmarshal(p)
|
||||||
if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
|
if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
|
||||||
@@ -454,16 +452,35 @@ func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.Mess
|
|||||||
}
|
}
|
||||||
|
|
||||||
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
|
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
|
||||||
func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...netip.AddrPort) {
|
func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...*udp.Addr) {
|
||||||
if !assert.Len(t, have, len(want)) {
|
if !assert.Len(t, have, len(want)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, w := range want {
|
for k, w := range want {
|
||||||
//TODO: IPV6-WORK
|
if !(have[k].Ip == uint32(iputil.Ip2VpnIp(w.IP)) && have[k].Port == uint32(w.Port)) {
|
||||||
h := AddrPortFromIp4AndPort(have[k])
|
assert.Fail(t, fmt.Sprintf("Response did not contain: %v:%v at %v; %v", w.IP, w.Port, k, translateV4toUdpAddr(have)))
|
||||||
if !(h == w) {
|
|
||||||
assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v, found %v", w, k, h))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// assertUdpAddrInArray asserts every address in want is at the same position in have and that the lengths match
|
||||||
|
func assertUdpAddrInArray(t *testing.T, have []*udp.Addr, want ...*udp.Addr) {
|
||||||
|
if !assert.Len(t, have, len(want)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, w := range want {
|
||||||
|
if !(have[k].IP.Equal(w.IP) && have[k].Port == w.Port) {
|
||||||
|
assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v; %v", w, k, have))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func translateV4toUdpAddr(ips []*Ip4AndPort) []*udp.Addr {
|
||||||
|
addrs := make([]*udp.Addr, len(ips))
|
||||||
|
for k, v := range ips {
|
||||||
|
addrs[k] = NewUDPAddrFromLH4(v)
|
||||||
|
}
|
||||||
|
return addrs
|
||||||
|
}
|
||||||
|
|||||||
125
main.go
125
main.go
@@ -5,7 +5,6 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@@ -68,17 +67,8 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
l.WithField("firewallHashes", fw.GetRuleHashes()).Info("Firewall started")
|
l.WithField("firewallHashes", fw.GetRuleHashes()).Info("Firewall started")
|
||||||
|
|
||||||
ones, _ := certificate.Details.Ips[0].Mask.Size()
|
// TODO: make sure mask is 4 bytes
|
||||||
addr, ok := netip.AddrFromSlice(certificate.Details.Ips[0].IP)
|
tunCidr := certificate.Details.Ips[0]
|
||||||
if !ok {
|
|
||||||
err = util.NewContextualError(
|
|
||||||
"Invalid ip address in certificate",
|
|
||||||
m{"vpnIp": certificate.Details.Ips[0].IP},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tunCidr := netip.PrefixFrom(addr, ones)
|
|
||||||
|
|
||||||
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -160,25 +150,21 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
|
|
||||||
if !configTest {
|
if !configTest {
|
||||||
rawListenHost := c.GetString("listen.host", "0.0.0.0")
|
rawListenHost := c.GetString("listen.host", "0.0.0.0")
|
||||||
var listenHost netip.Addr
|
var listenHost *net.IPAddr
|
||||||
if rawListenHost == "[::]" {
|
if rawListenHost == "[::]" {
|
||||||
// Old guidance was to provide the literal `[::]` in `listen.host` but that won't resolve.
|
// Old guidance was to provide the literal `[::]` in `listen.host` but that won't resolve.
|
||||||
listenHost = netip.IPv6Unspecified()
|
listenHost = &net.IPAddr{IP: net.IPv6zero}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
ips, err := net.DefaultResolver.LookupNetIP(context.Background(), "ip", rawListenHost)
|
listenHost, err = net.ResolveIPAddr("ip", rawListenHost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to resolve listen.host", err)
|
return nil, util.ContextualizeIfNeeded("Failed to resolve listen.host", err)
|
||||||
}
|
}
|
||||||
if len(ips) == 0 {
|
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to resolve listen.host", err)
|
|
||||||
}
|
|
||||||
listenHost = ips[0].Unmap()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < routines; i++ {
|
for i := 0; i < routines; i++ {
|
||||||
l.Infof("listening on %v", netip.AddrPortFrom(listenHost, uint16(port)))
|
l.Infof("listening %q %d", listenHost.IP, port)
|
||||||
udpServer, err := udp.NewListener(l, listenHost, port, routines > 1, c.GetInt("listen.batch", 64))
|
udpServer, err := udp.NewListener(l, listenHost.IP, port, routines > 1, c.GetInt("listen.batch", 64))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
||||||
}
|
}
|
||||||
@@ -192,14 +178,58 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to get listening port", nil, err)
|
return nil, util.NewContextualError("Failed to get listening port", nil, err)
|
||||||
}
|
}
|
||||||
port = int(uPort.Port())
|
port = int(uPort.Port)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hostMap := NewHostMapFromConfig(l, tunCidr, c)
|
// Set up my internal host map
|
||||||
|
var preferredRanges []*net.IPNet
|
||||||
|
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
|
||||||
|
// First, check if 'preferred_ranges' is set and fallback to 'local_range'
|
||||||
|
if len(rawPreferredRanges) > 0 {
|
||||||
|
for _, rawPreferredRange := range rawPreferredRanges {
|
||||||
|
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.ContextualizeIfNeeded("Failed to parse preferred ranges", err)
|
||||||
|
}
|
||||||
|
preferredRanges = append(preferredRanges, preferredRange)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// local_range was superseded by preferred_ranges. If it is still present,
|
||||||
|
// merge the local_range setting into preferred_ranges. We will probably
|
||||||
|
// deprecate local_range and remove in the future.
|
||||||
|
rawLocalRange := c.GetString("local_range", "")
|
||||||
|
if rawLocalRange != "" {
|
||||||
|
_, localRange, err := net.ParseCIDR(rawLocalRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.ContextualizeIfNeeded("Failed to parse local_range", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the entry for local_range was already specified in
|
||||||
|
// preferred_ranges. Don't put it into the slice twice if so.
|
||||||
|
var found bool
|
||||||
|
for _, r := range preferredRanges {
|
||||||
|
if r.String() == localRange.String() {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
preferredRanges = append(preferredRanges, localRange)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hostMap := NewHostMap(l, tunCidr, preferredRanges)
|
||||||
|
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
|
||||||
|
|
||||||
|
l.
|
||||||
|
WithField("network", hostMap.vpnCIDR.String()).
|
||||||
|
WithField("preferredRanges", hostMap.preferredRanges).
|
||||||
|
Info("Main HostMap created")
|
||||||
|
|
||||||
punchy := NewPunchyFromConfig(l, c)
|
punchy := NewPunchyFromConfig(l, c)
|
||||||
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
|
|
||||||
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
||||||
@@ -216,7 +246,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
|
|
||||||
handshakeConfig := HandshakeConfig{
|
handshakeConfig := HandshakeConfig{
|
||||||
tryInterval: c.GetDuration("handshakes.try_interval", DefaultHandshakeTryInterval),
|
tryInterval: c.GetDuration("handshakes.try_interval", DefaultHandshakeTryInterval),
|
||||||
retries: int64(c.GetInt("handshakes.retries", DefaultHandshakeRetries)),
|
retries: c.GetInt("handshakes.retries", DefaultHandshakeRetries),
|
||||||
triggerBuffer: c.GetInt("handshakes.trigger_buffer", DefaultHandshakeTriggerBuffer),
|
triggerBuffer: c.GetInt("handshakes.trigger_buffer", DefaultHandshakeTriggerBuffer),
|
||||||
useRelays: useRelays,
|
useRelays: useRelays,
|
||||||
|
|
||||||
@@ -235,27 +265,31 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
||||||
|
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
||||||
|
|
||||||
ifConfig := &InterfaceConfig{
|
ifConfig := &InterfaceConfig{
|
||||||
HostMap: hostMap,
|
HostMap: hostMap,
|
||||||
Inside: tun,
|
Inside: tun,
|
||||||
Outside: udpConns[0],
|
Outside: udpConns[0],
|
||||||
pki: pki,
|
pki: pki,
|
||||||
Cipher: c.GetString("cipher", "aes"),
|
Cipher: c.GetString("cipher", "aes"),
|
||||||
Firewall: fw,
|
Firewall: fw,
|
||||||
ServeDns: serveDns,
|
ServeDns: serveDns,
|
||||||
HandshakeManager: handshakeManager,
|
HandshakeManager: handshakeManager,
|
||||||
connectionManager: connManager,
|
lightHouse: lightHouse,
|
||||||
lightHouse: lightHouse,
|
checkInterval: time.Second * time.Duration(checkInterval),
|
||||||
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
|
||||||
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||||
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||||
routines: routines,
|
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||||
MessageMetrics: messageMetrics,
|
routines: routines,
|
||||||
version: buildVersion,
|
MessageMetrics: messageMetrics,
|
||||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
version: buildVersion,
|
||||||
punchy: punchy,
|
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||||
|
punchy: punchy,
|
||||||
|
|
||||||
ConntrackCacheTimeout: conntrackCacheTimeout,
|
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||||
l: l,
|
l: l,
|
||||||
@@ -322,6 +356,5 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
statsStart,
|
statsStart,
|
||||||
dnsStart,
|
dnsStart,
|
||||||
lightHouse.StartUpdateWorker,
|
lightHouse.StartUpdateWorker,
|
||||||
connManager.Start,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
10
noise.go
10
noise.go
@@ -28,11 +28,11 @@ func NewNebulaCipherState(s *noise.CipherState) *NebulaCipherState {
|
|||||||
// EncryptDanger encrypts and authenticates a given payload.
|
// EncryptDanger encrypts and authenticates a given payload.
|
||||||
//
|
//
|
||||||
// out is a destination slice to hold the output of the EncryptDanger operation.
|
// out is a destination slice to hold the output of the EncryptDanger operation.
|
||||||
// - ad is additional data, which will be authenticated and appended to out, but not encrypted.
|
// - ad is additional data, which will be authenticated and appended to out, but not encrypted.
|
||||||
// - plaintext is encrypted, authenticated and appended to out.
|
// - plaintext is encrypted, authenticated and appended to out.
|
||||||
// - n is a nonce value which must never be re-used with this key.
|
// - n is a nonce value which must never be re-used with this key.
|
||||||
// - nb is a buffer used for temporary storage in the implementation of this call, which should
|
// - nb is a buffer used for temporary storage in the implementation of this call, which should
|
||||||
// be re-used by callers to minimize garbage collection.
|
// be re-used by callers to minimize garbage collection.
|
||||||
func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
|
func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
|
||||||
if s != nil {
|
if s != nil {
|
||||||
// TODO: Is this okay now that we have made messageCounter atomic?
|
// TODO: Is this okay now that we have made messageCounter atomic?
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ func (c nistCurve) DH(privkey, pubkey []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
ecdhPrivKey, err := c.curve.NewPrivateKey(privkey)
|
ecdhPrivKey, err := c.curve.NewPrivateKey(privkey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to unmarshal private key: %w", err)
|
return nil, fmt.Errorf("unable to unmarshal pubkey: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ecdhPrivKey.ECDH(ecdhPubKey)
|
return ecdhPrivKey.ECDH(ecdhPubKey)
|
||||||
|
|||||||
121
outside.go
121
outside.go
@@ -4,7 +4,6 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
@@ -12,6 +11,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
@@ -21,10 +21,9 @@ const (
|
|||||||
minFwPacketLen = 4
|
minFwPacketLen = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: IPV6-WORK this can likely be removed now
|
|
||||||
func readOutsidePackets(f *Interface) udp.EncReader {
|
func readOutsidePackets(f *Interface) udp.EncReader {
|
||||||
return func(
|
return func(
|
||||||
addr netip.AddrPort,
|
addr *udp.Addr,
|
||||||
out []byte,
|
out []byte,
|
||||||
packet []byte,
|
packet []byte,
|
||||||
header *header.H,
|
header *header.H,
|
||||||
@@ -38,25 +37,27 @@ func readOutsidePackets(f *Interface) udp.EncReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) readOutsidePackets(addr *udp.Addr, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
err := h.Parse(packet)
|
err := h.Parse(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: best if we return this and let caller log
|
// TODO: best if we return this and let caller log
|
||||||
// TODO: Might be better to send the literal []byte("holepunch") packet and ignore that?
|
// TODO: Might be better to send the literal []byte("holepunch") packet and ignore that?
|
||||||
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
||||||
if len(packet) > 1 {
|
if len(packet) > 1 {
|
||||||
f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", ip, err)
|
f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", addr, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//l.Error("in packet ", header, packet[HeaderLen:])
|
//l.Error("in packet ", header, packet[HeaderLen:])
|
||||||
if ip.IsValid() {
|
if addr != nil {
|
||||||
if f.myVpnNet.Contains(ip.Addr()) {
|
if ip4 := addr.IP.To4(); ip4 != nil {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, iputil.VpnIp(binary.BigEndian.Uint32(ip4))) {
|
||||||
f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet")
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("udpAddr", addr).Debug("Refusing to process double encrypted packet")
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,7 +77,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
switch h.Type {
|
switch h.Type {
|
||||||
case header.Message:
|
case header.Message:
|
||||||
// TODO handleEncrypted sends directly to addr on error. Handle this in the tunneling case.
|
// TODO handleEncrypted sends directly to addr on error. Handle this in the tunneling case.
|
||||||
if !f.handleEncrypted(ci, ip, h) {
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,9 +101,9 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
// Successfully validated the thing. Get rid of the Relay header.
|
// Successfully validated the thing. Get rid of the Relay header.
|
||||||
signedPayload = signedPayload[header.Len:]
|
signedPayload = signedPayload[header.Len:]
|
||||||
// Pull the Roaming parts up here, and return in all call paths.
|
// Pull the Roaming parts up here, and return in all call paths.
|
||||||
f.handleHostRoaming(hostinfo, ip)
|
f.handleHostRoaming(hostinfo, addr)
|
||||||
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
||||||
f.connectionManager.In(hostinfo)
|
f.connectionManager.In(hostinfo.localIndexId)
|
||||||
f.connectionManager.RelayUsed(h.RemoteIndex)
|
f.connectionManager.RelayUsed(h.RemoteIndex)
|
||||||
|
|
||||||
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
||||||
@@ -117,7 +118,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
case TerminalType:
|
case TerminalType:
|
||||||
// If I am the target of this relay, process the unwrapped packet
|
// If I am the target of this relay, process the unwrapped packet
|
||||||
// From this recursive point, all these variables are 'burned'. We shouldn't rely on them again.
|
// From this recursive point, all these variables are 'burned'. We shouldn't rely on them again.
|
||||||
f.readOutsidePackets(netip.AddrPort{}, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache)
|
f.readOutsidePackets(nil, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache)
|
||||||
return
|
return
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
// Find the target HostInfo relay object
|
// Find the target HostInfo relay object
|
||||||
@@ -147,13 +148,13 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
|
|
||||||
case header.LightHouse:
|
case header.LightHouse:
|
||||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
if !f.handleEncrypted(ci, ip, h) {
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("packet", packet).
|
WithField("packet", packet).
|
||||||
Error("Failed to decrypt lighthouse packet")
|
Error("Failed to decrypt lighthouse packet")
|
||||||
|
|
||||||
@@ -162,19 +163,19 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lhf(ip, hostinfo.vpnIp, d)
|
lhf(addr, hostinfo.vpnIp, d)
|
||||||
|
|
||||||
// Fallthrough to the bottom to record incoming traffic
|
// Fallthrough to the bottom to record incoming traffic
|
||||||
|
|
||||||
case header.Test:
|
case header.Test:
|
||||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
if !f.handleEncrypted(ci, ip, h) {
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("packet", packet).
|
WithField("packet", packet).
|
||||||
Error("Failed to decrypt test packet")
|
Error("Failed to decrypt test packet")
|
||||||
|
|
||||||
@@ -186,7 +187,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
if h.Subtype == header.TestRequest {
|
if h.Subtype == header.TestRequest {
|
||||||
// This testRequest might be from TryPromoteBest, so we should roam
|
// This testRequest might be from TryPromoteBest, so we should roam
|
||||||
// to the new IP address before responding
|
// to the new IP address before responding
|
||||||
f.handleHostRoaming(hostinfo, ip)
|
f.handleHostRoaming(hostinfo, addr)
|
||||||
f.send(header.Test, header.TestReply, ci, hostinfo, d, nb, out)
|
f.send(header.Test, header.TestReply, ci, hostinfo, d, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,34 +198,34 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
|
|
||||||
case header.Handshake:
|
case header.Handshake:
|
||||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
f.handshakeManager.HandleIncoming(ip, via, packet, h)
|
f.handshakeManager.HandleIncoming(addr, via, packet, h)
|
||||||
return
|
return
|
||||||
|
|
||||||
case header.RecvError:
|
case header.RecvError:
|
||||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
f.handleRecvError(ip, h)
|
f.handleRecvError(addr, h)
|
||||||
return
|
return
|
||||||
|
|
||||||
case header.CloseTunnel:
|
case header.CloseTunnel:
|
||||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
if !f.handleEncrypted(ci, ip, h) {
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger(f.l).WithField("udpAddr", ip).
|
hostinfo.logger(f.l).WithField("udpAddr", addr).
|
||||||
Info("Close tunnel received, tearing down.")
|
Info("Close tunnel received, tearing down.")
|
||||||
|
|
||||||
f.closeTunnel(hostinfo)
|
f.closeTunnel(hostinfo)
|
||||||
return
|
return
|
||||||
|
|
||||||
case header.Control:
|
case header.Control:
|
||||||
if !f.handleEncrypted(ci, ip, h) {
|
if !f.handleEncrypted(ci, addr, h) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("packet", packet).
|
WithField("packet", packet).
|
||||||
Error("Failed to decrypt Control packet")
|
Error("Failed to decrypt Control packet")
|
||||||
return
|
return
|
||||||
@@ -240,13 +241,13 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
hostinfo.logger(f.l).Debugf("Unexpected packet received from %s", ip)
|
hostinfo.logger(f.l).Debugf("Unexpected packet received from %s", addr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.handleHostRoaming(hostinfo, ip)
|
f.handleHostRoaming(hostinfo, addr)
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo)
|
f.connectionManager.In(hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
||||||
@@ -263,41 +264,39 @@ func (f *Interface) sendCloseTunnel(h *HostInfo) {
|
|||||||
f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, ip netip.AddrPort) {
|
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udp.Addr) {
|
||||||
if ip.IsValid() && hostinfo.remote != ip {
|
if addr != nil && !hostinfo.remote.Equals(addr) {
|
||||||
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, ip.Addr()) {
|
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
|
||||||
hostinfo.logger(f.l).WithField("newAddr", ip).Debug("lighthouse.remote_allow_list denied roaming")
|
hostinfo.logger(f.l).WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !hostinfo.lastRoam.IsZero() && ip == hostinfo.lastRoamRemote && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second {
|
if !hostinfo.lastRoam.IsZero() && addr.Equals(hostinfo.lastRoamRemote) && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", ip).
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
||||||
Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds)
|
Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", ip).
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
||||||
Info("Host roamed to new udp ip/port.")
|
Info("Host roamed to new udp ip/port.")
|
||||||
hostinfo.lastRoam = time.Now()
|
hostinfo.lastRoam = time.Now()
|
||||||
hostinfo.lastRoamRemote = hostinfo.remote
|
hostinfo.lastRoamRemote = hostinfo.remote
|
||||||
hostinfo.SetRemote(ip)
|
hostinfo.SetRemote(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleEncrypted returns true if a packet should be processed, false otherwise
|
func (f *Interface) handleEncrypted(ci *ConnectionState, addr *udp.Addr, h *header.H) bool {
|
||||||
func (f *Interface) handleEncrypted(ci *ConnectionState, addr netip.AddrPort, h *header.H) bool {
|
// If connectionstate exists and the replay protector allows, process packet
|
||||||
// If connectionstate does not exist, send a recv error, if possible, to encourage a fast reconnect
|
// Else, send recv errors for 300 seconds after a restart to allow fast reconnection.
|
||||||
if ci == nil {
|
if ci == nil || !ci.window.Check(f.l, h.MessageCounter) {
|
||||||
if addr.IsValid() {
|
if addr != nil {
|
||||||
f.maybeSendRecvError(addr, h.RemoteIndex)
|
f.maybeSendRecvError(addr, h.RemoteIndex)
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
}
|
|
||||||
// If the window check fails, refuse to process the packet, but don't send a recv error
|
|
||||||
if !ci.window.Check(f.l, h.MessageCounter) {
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
@@ -341,9 +340,8 @@ func newPacket(data []byte, incoming bool, fp *firewall.Packet) error {
|
|||||||
|
|
||||||
// Firewall packets are locally oriented
|
// Firewall packets are locally oriented
|
||||||
if incoming {
|
if incoming {
|
||||||
//TODO: IPV6-WORK
|
fp.RemoteIP = iputil.Ip2VpnIp(data[12:16])
|
||||||
fp.RemoteIP, _ = netip.AddrFromSlice(data[12:16])
|
fp.LocalIP = iputil.Ip2VpnIp(data[16:20])
|
||||||
fp.LocalIP, _ = netip.AddrFromSlice(data[16:20])
|
|
||||||
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
||||||
fp.RemotePort = 0
|
fp.RemotePort = 0
|
||||||
fp.LocalPort = 0
|
fp.LocalPort = 0
|
||||||
@@ -352,9 +350,8 @@ func newPacket(data []byte, incoming bool, fp *firewall.Packet) error {
|
|||||||
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
//TODO: IPV6-WORK
|
fp.LocalIP = iputil.Ip2VpnIp(data[12:16])
|
||||||
fp.LocalIP, _ = netip.AddrFromSlice(data[12:16])
|
fp.RemoteIP = iputil.Ip2VpnIp(data[16:20])
|
||||||
fp.RemoteIP, _ = netip.AddrFromSlice(data[16:20])
|
|
||||||
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
||||||
fp.RemotePort = 0
|
fp.RemotePort = 0
|
||||||
fp.LocalPort = 0
|
fp.LocalPort = 0
|
||||||
@@ -407,7 +404,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
dropReason := f.firewall.Drop(*fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache)
|
dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache)
|
||||||
if dropReason != nil {
|
if dropReason != nil {
|
||||||
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
|
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
|
||||||
// This gives us a buffer to build the reject packet in
|
// This gives us a buffer to build the reject packet in
|
||||||
@@ -420,7 +417,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo)
|
f.connectionManager.In(hostinfo.localIndexId)
|
||||||
_, err = f.readers[q].Write(out)
|
_, err = f.readers[q].Write(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).Error("Failed to write to tun")
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
@@ -428,13 +425,13 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) maybeSendRecvError(endpoint netip.AddrPort, index uint32) {
|
func (f *Interface) maybeSendRecvError(endpoint *udp.Addr, index uint32) {
|
||||||
if f.sendRecvErrorConfig.ShouldSendRecvError(endpoint) {
|
if f.sendRecvErrorConfig.ShouldSendRecvError(endpoint.IP) {
|
||||||
f.sendRecvError(endpoint, index)
|
f.sendRecvError(endpoint, index)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendRecvError(endpoint netip.AddrPort, index uint32) {
|
func (f *Interface) sendRecvError(endpoint *udp.Addr, index uint32) {
|
||||||
f.messageMetrics.Tx(header.RecvError, 0, 1)
|
f.messageMetrics.Tx(header.RecvError, 0, 1)
|
||||||
|
|
||||||
//TODO: this should be a signed message so we can trust that we should drop the index
|
//TODO: this should be a signed message so we can trust that we should drop the index
|
||||||
@@ -447,7 +444,7 @@ func (f *Interface) sendRecvError(endpoint netip.AddrPort, index uint32) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) handleRecvError(addr netip.AddrPort, h *header.H) {
|
func (f *Interface) handleRecvError(addr *udp.Addr, h *header.H) {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("index", h.RemoteIndex).
|
f.l.WithField("index", h.RemoteIndex).
|
||||||
WithField("udpAddr", addr).
|
WithField("udpAddr", addr).
|
||||||
@@ -460,7 +457,11 @@ func (f *Interface) handleRecvError(addr netip.AddrPort, h *header.H) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if hostinfo.remote.IsValid() && hostinfo.remote != addr {
|
if !hostinfo.RecvErrorExceeded() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostinfo.remote != nil && !hostinfo.remote.Equals(addr) {
|
||||||
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
)
|
)
|
||||||
@@ -55,8 +55,8 @@ func Test_newPacket(t *testing.T) {
|
|||||||
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, p.Protocol, uint8(firewall.ProtoTCP))
|
assert.Equal(t, p.Protocol, uint8(firewall.ProtoTCP))
|
||||||
assert.Equal(t, p.LocalIP, netip.MustParseAddr("10.0.0.2"))
|
assert.Equal(t, p.LocalIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 2)))
|
||||||
assert.Equal(t, p.RemoteIP, netip.MustParseAddr("10.0.0.1"))
|
assert.Equal(t, p.RemoteIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 1)))
|
||||||
assert.Equal(t, p.RemotePort, uint16(3))
|
assert.Equal(t, p.RemotePort, uint16(3))
|
||||||
assert.Equal(t, p.LocalPort, uint16(4))
|
assert.Equal(t, p.LocalPort, uint16(4))
|
||||||
|
|
||||||
@@ -76,8 +76,8 @@ func Test_newPacket(t *testing.T) {
|
|||||||
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, p.Protocol, uint8(2))
|
assert.Equal(t, p.Protocol, uint8(2))
|
||||||
assert.Equal(t, p.LocalIP, netip.MustParseAddr("10.0.0.1"))
|
assert.Equal(t, p.LocalIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 1)))
|
||||||
assert.Equal(t, p.RemoteIP, netip.MustParseAddr("10.0.0.2"))
|
assert.Equal(t, p.RemoteIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 2)))
|
||||||
assert.Equal(t, p.RemotePort, uint16(6))
|
assert.Equal(t, p.RemotePort, uint16(6))
|
||||||
assert.Equal(t, p.LocalPort, uint16(5))
|
assert.Equal(t, p.LocalPort, uint16(5))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,14 +2,16 @@ package overlay
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Device interface {
|
type Device interface {
|
||||||
io.ReadWriteCloser
|
io.ReadWriteCloser
|
||||||
Activate() error
|
Activate() error
|
||||||
Cidr() netip.Prefix
|
Cidr() *net.IPNet
|
||||||
Name() string
|
Name() string
|
||||||
RouteFor(netip.Addr) netip.Addr
|
RouteFor(iputil.VpnIp) iputil.VpnIp
|
||||||
NewMultiQueueReader() (io.ReadWriteCloser, error)
|
NewMultiQueueReader() (io.ReadWriteCloser, error)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,64 +4,38 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Route struct {
|
type Route struct {
|
||||||
MTU int
|
MTU int
|
||||||
Metric int
|
Metric int
|
||||||
Cidr netip.Prefix
|
Cidr *net.IPNet
|
||||||
Via netip.Addr
|
Via *iputil.VpnIp
|
||||||
Install bool
|
Install bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal determines if a route that could be installed in the system route table is equal to another
|
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*cidr.Tree4[iputil.VpnIp], error) {
|
||||||
// Via is ignored since that is only consumed within nebula itself
|
routeTree := cidr.NewTree4[iputil.VpnIp]()
|
||||||
func (r Route) Equal(t Route) bool {
|
|
||||||
if r.Cidr != t.Cidr {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if r.Metric != t.Metric {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if r.MTU != t.MTU {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if r.Install != t.Install {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r Route) String() string {
|
|
||||||
s := r.Cidr.String()
|
|
||||||
if r.Metric != 0 {
|
|
||||||
s += fmt.Sprintf(" metric: %v", r.Metric)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*bart.Table[netip.Addr], error) {
|
|
||||||
routeTree := new(bart.Table[netip.Addr])
|
|
||||||
for _, r := range routes {
|
for _, r := range routes {
|
||||||
if !allowMTU && r.MTU > 0 {
|
if !allowMTU && r.MTU > 0 {
|
||||||
l.WithField("route", r).Warnf("route MTU is not supported in %s", runtime.GOOS)
|
l.WithField("route", r).Warnf("route MTU is not supported in %s", runtime.GOOS)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Via.IsValid() {
|
if r.Via != nil {
|
||||||
routeTree.Insert(r.Cidr, r.Via)
|
routeTree.AddCIDR(r.Cidr, *r.Via)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return routeTree, nil
|
return routeTree, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseRoutes(c *config.C, network netip.Prefix) ([]Route, error) {
|
func parseRoutes(c *config.C, network *net.IPNet) ([]Route, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
r := c.Get("tun.routes")
|
r := c.Get("tun.routes")
|
||||||
@@ -112,12 +86,12 @@ func parseRoutes(c *config.C, network netip.Prefix) ([]Route, error) {
|
|||||||
MTU: mtu,
|
MTU: mtu,
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Cidr, err = netip.ParsePrefix(fmt.Sprintf("%v", rRoute))
|
_, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("entry %v.route in tun.routes failed to parse: %v", i+1, err)
|
return nil, fmt.Errorf("entry %v.route in tun.routes failed to parse: %v", i+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !network.Contains(r.Cidr.Addr()) || r.Cidr.Bits() < network.Bits() {
|
if !ipWithin(network, r.Cidr) {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"entry %v.route in tun.routes is not contained within the network attached to the certificate; route: %v, network: %v",
|
"entry %v.route in tun.routes is not contained within the network attached to the certificate; route: %v, network: %v",
|
||||||
i+1,
|
i+1,
|
||||||
@@ -132,7 +106,7 @@ func parseRoutes(c *config.C, network netip.Prefix) ([]Route, error) {
|
|||||||
return routes, nil
|
return routes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseUnsafeRoutes(c *config.C, network netip.Prefix) ([]Route, error) {
|
func parseUnsafeRoutes(c *config.C, network *net.IPNet) ([]Route, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
r := c.Get("tun.unsafe_routes")
|
r := c.Get("tun.unsafe_routes")
|
||||||
@@ -198,9 +172,9 @@ func parseUnsafeRoutes(c *config.C, network netip.Prefix) ([]Route, error) {
|
|||||||
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes is not a string: found %T", i+1, rVia)
|
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes is not a string: found %T", i+1, rVia)
|
||||||
}
|
}
|
||||||
|
|
||||||
viaVpnIp, err := netip.ParseAddr(via)
|
nVia := net.ParseIP(via)
|
||||||
if err != nil {
|
if nVia == nil {
|
||||||
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes failed to parse address: %v", i+1, err)
|
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes failed to parse address: %v", i+1, via)
|
||||||
}
|
}
|
||||||
|
|
||||||
rRoute, ok := m["route"]
|
rRoute, ok := m["route"]
|
||||||
@@ -208,6 +182,8 @@ func parseUnsafeRoutes(c *config.C, network netip.Prefix) ([]Route, error) {
|
|||||||
return nil, fmt.Errorf("entry %v.route in tun.unsafe_routes is not present", i+1)
|
return nil, fmt.Errorf("entry %v.route in tun.unsafe_routes is not present", i+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
viaVpnIp := iputil.Ip2VpnIp(nVia)
|
||||||
|
|
||||||
install := true
|
install := true
|
||||||
rInstall, ok := m["install"]
|
rInstall, ok := m["install"]
|
||||||
if ok {
|
if ok {
|
||||||
@@ -218,18 +194,18 @@ func parseUnsafeRoutes(c *config.C, network netip.Prefix) ([]Route, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := Route{
|
r := Route{
|
||||||
Via: viaVpnIp,
|
Via: &viaVpnIp,
|
||||||
MTU: mtu,
|
MTU: mtu,
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Install: install,
|
Install: install,
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Cidr, err = netip.ParsePrefix(fmt.Sprintf("%v", rRoute))
|
_, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("entry %v.route in tun.unsafe_routes failed to parse: %v", i+1, err)
|
return nil, fmt.Errorf("entry %v.route in tun.unsafe_routes failed to parse: %v", i+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if network.Contains(r.Cidr.Addr()) {
|
if ipWithin(network, r.Cidr) {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"entry %v.route in tun.unsafe_routes is contained within the network attached to the certificate; route: %v, network: %v",
|
"entry %v.route in tun.unsafe_routes is contained within the network attached to the certificate; route: %v, network: %v",
|
||||||
i+1,
|
i+1,
|
||||||
|
|||||||
@@ -2,10 +2,11 @@ package overlay
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@@ -13,8 +14,7 @@ import (
|
|||||||
func Test_parseRoutes(t *testing.T) {
|
func Test_parseRoutes(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
n, err := netip.ParsePrefix("10.0.0.0/24")
|
_, n, _ := net.ParseCIDR("10.0.0.0/24")
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// test no routes config
|
// test no routes config
|
||||||
routes, err := parseRoutes(c, n)
|
routes, err := parseRoutes(c, n)
|
||||||
@@ -67,7 +67,7 @@ func Test_parseRoutes(t *testing.T) {
|
|||||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "nope"}}}
|
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "nope"}}}
|
||||||
routes, err = parseRoutes(c, n)
|
routes, err = parseRoutes(c, n)
|
||||||
assert.Nil(t, routes)
|
assert.Nil(t, routes)
|
||||||
assert.EqualError(t, err, "entry 1.route in tun.routes failed to parse: netip.ParsePrefix(\"nope\"): no '/'")
|
assert.EqualError(t, err, "entry 1.route in tun.routes failed to parse: invalid CIDR address: nope")
|
||||||
|
|
||||||
// below network range
|
// below network range
|
||||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "1.0.0.0/8"}}}
|
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "1.0.0.0/8"}}}
|
||||||
@@ -112,8 +112,7 @@ func Test_parseRoutes(t *testing.T) {
|
|||||||
func Test_parseUnsafeRoutes(t *testing.T) {
|
func Test_parseUnsafeRoutes(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
n, err := netip.ParsePrefix("10.0.0.0/24")
|
_, n, _ := net.ParseCIDR("10.0.0.0/24")
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// test no routes config
|
// test no routes config
|
||||||
routes, err := parseUnsafeRoutes(c, n)
|
routes, err := parseUnsafeRoutes(c, n)
|
||||||
@@ -158,7 +157,7 @@ func Test_parseUnsafeRoutes(t *testing.T) {
|
|||||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "via": "nope"}}}
|
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "via": "nope"}}}
|
||||||
routes, err = parseUnsafeRoutes(c, n)
|
routes, err = parseUnsafeRoutes(c, n)
|
||||||
assert.Nil(t, routes)
|
assert.Nil(t, routes)
|
||||||
assert.EqualError(t, err, "entry 1.via in tun.unsafe_routes failed to parse address: ParseAddr(\"nope\"): unable to parse IP")
|
assert.EqualError(t, err, "entry 1.via in tun.unsafe_routes failed to parse address: nope")
|
||||||
|
|
||||||
// missing route
|
// missing route
|
||||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500"}}}
|
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500"}}}
|
||||||
@@ -170,7 +169,7 @@ func Test_parseUnsafeRoutes(t *testing.T) {
|
|||||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500", "route": "nope"}}}
|
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500", "route": "nope"}}}
|
||||||
routes, err = parseUnsafeRoutes(c, n)
|
routes, err = parseUnsafeRoutes(c, n)
|
||||||
assert.Nil(t, routes)
|
assert.Nil(t, routes)
|
||||||
assert.EqualError(t, err, "entry 1.route in tun.unsafe_routes failed to parse: netip.ParsePrefix(\"nope\"): no '/'")
|
assert.EqualError(t, err, "entry 1.route in tun.unsafe_routes failed to parse: invalid CIDR address: nope")
|
||||||
|
|
||||||
// within network range
|
// within network range
|
||||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "10.0.0.0/24"}}}
|
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "10.0.0.0/24"}}}
|
||||||
@@ -253,8 +252,7 @@ func Test_parseUnsafeRoutes(t *testing.T) {
|
|||||||
func Test_makeRouteTree(t *testing.T) {
|
func Test_makeRouteTree(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
n, err := netip.ParsePrefix("10.0.0.0/24")
|
_, n, _ := net.ParseCIDR("10.0.0.0/24")
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{
|
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{
|
||||||
map[interface{}]interface{}{"via": "192.168.0.1", "route": "1.0.0.0/28"},
|
map[interface{}]interface{}{"via": "192.168.0.1", "route": "1.0.0.0/28"},
|
||||||
@@ -266,26 +264,17 @@ func Test_makeRouteTree(t *testing.T) {
|
|||||||
routeTree, err := makeRouteTree(l, routes, true)
|
routeTree, err := makeRouteTree(l, routes, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
ip, err := netip.ParseAddr("1.0.0.2")
|
ip := iputil.Ip2VpnIp(net.ParseIP("1.0.0.2"))
|
||||||
assert.NoError(t, err)
|
ok, r := routeTree.MostSpecificContains(ip)
|
||||||
r, ok := routeTree.Lookup(ip)
|
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.1")), r)
|
||||||
|
|
||||||
nip, err := netip.ParseAddr("192.168.0.1")
|
ip = iputil.Ip2VpnIp(net.ParseIP("1.0.0.1"))
|
||||||
assert.NoError(t, err)
|
ok, r = routeTree.MostSpecificContains(ip)
|
||||||
assert.Equal(t, nip, r)
|
|
||||||
|
|
||||||
ip, err = netip.ParseAddr("1.0.0.1")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
r, ok = routeTree.Lookup(ip)
|
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.2")), r)
|
||||||
|
|
||||||
nip, err = netip.ParseAddr("192.168.0.2")
|
ip = iputil.Ip2VpnIp(net.ParseIP("1.1.0.1"))
|
||||||
assert.NoError(t, err)
|
ok, r = routeTree.MostSpecificContains(ip)
|
||||||
assert.Equal(t, nip, r)
|
|
||||||
|
|
||||||
ip, err = netip.ParseAddr("1.1.0.1")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
r, ok = routeTree.Lookup(ip)
|
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package overlay
|
package overlay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"net"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
@@ -10,63 +10,60 @@ import (
|
|||||||
|
|
||||||
const DefaultMTU = 1300
|
const DefaultMTU = 1300
|
||||||
|
|
||||||
// TODO: We may be able to remove routines
|
type DeviceFactory func(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, routines int) (Device, error)
|
||||||
type DeviceFactory func(c *config.C, l *logrus.Logger, tunCidr netip.Prefix, routines int) (Device, error)
|
|
||||||
|
func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, routines int) (Device, error) {
|
||||||
|
routes, err := parseRoutes(c, tunCidr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.NewContextualError("Could not parse tun.routes", nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafeRoutes, err := parseUnsafeRoutes(c, tunCidr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, util.NewContextualError("Could not parse tun.unsafe_routes", nil, err)
|
||||||
|
}
|
||||||
|
routes = append(routes, unsafeRoutes...)
|
||||||
|
|
||||||
func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr netip.Prefix, routines int) (Device, error) {
|
|
||||||
switch {
|
switch {
|
||||||
case c.GetBool("tun.disabled", false):
|
case c.GetBool("tun.disabled", false):
|
||||||
tun := newDisabledTun(tunCidr, c.GetInt("tun.tx_queue", 500), c.GetBool("stats.message_metrics", false), l)
|
tun := newDisabledTun(tunCidr, c.GetInt("tun.tx_queue", 500), c.GetBool("stats.message_metrics", false), l)
|
||||||
return tun, nil
|
return tun, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return newTun(c, l, tunCidr, routines > 1)
|
return newTun(
|
||||||
|
l,
|
||||||
|
c.GetString("tun.dev", ""),
|
||||||
|
tunCidr,
|
||||||
|
c.GetInt("tun.mtu", DefaultMTU),
|
||||||
|
routes,
|
||||||
|
c.GetInt("tun.tx_queue", 500),
|
||||||
|
routines > 1,
|
||||||
|
c.GetBool("tun.use_system_route_table", false),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFdDeviceFromConfig(fd *int) DeviceFactory {
|
func NewFdDeviceFromConfig(fd *int) DeviceFactory {
|
||||||
return func(c *config.C, l *logrus.Logger, tunCidr netip.Prefix, routines int) (Device, error) {
|
return func(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, routines int) (Device, error) {
|
||||||
return newTunFromFd(c, l, *fd, tunCidr)
|
routes, err := parseRoutes(c, tunCidr)
|
||||||
}
|
if err != nil {
|
||||||
}
|
return nil, util.NewContextualError("Could not parse tun.routes", nil, err)
|
||||||
|
|
||||||
func getAllRoutesFromConfig(c *config.C, cidr netip.Prefix, initial bool) (bool, []Route, error) {
|
|
||||||
if !initial && !c.HasChanged("tun.routes") && !c.HasChanged("tun.unsafe_routes") {
|
|
||||||
return false, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
routes, err := parseRoutes(c, cidr)
|
|
||||||
if err != nil {
|
|
||||||
return true, nil, util.NewContextualError("Could not parse tun.routes", nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafeRoutes, err := parseUnsafeRoutes(c, cidr)
|
|
||||||
if err != nil {
|
|
||||||
return true, nil, util.NewContextualError("Could not parse tun.unsafe_routes", nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
routes = append(routes, unsafeRoutes...)
|
|
||||||
return true, routes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findRemovedRoutes will return all routes that are not present in the newRoutes list and would affect the system route table.
|
|
||||||
// Via is not used to evaluate since it does not affect the system route table.
|
|
||||||
func findRemovedRoutes(newRoutes, oldRoutes []Route) []Route {
|
|
||||||
var removed []Route
|
|
||||||
has := func(entry Route) bool {
|
|
||||||
for _, check := range newRoutes {
|
|
||||||
if check.Equal(entry) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, oldEntry := range oldRoutes {
|
unsafeRoutes, err := parseUnsafeRoutes(c, tunCidr)
|
||||||
if !has(oldEntry) {
|
if err != nil {
|
||||||
removed = append(removed, oldEntry)
|
return nil, util.NewContextualError("Could not parse tun.unsafe_routes", nil, err)
|
||||||
}
|
}
|
||||||
}
|
routes = append(routes, unsafeRoutes...)
|
||||||
|
return newTunFromFd(
|
||||||
|
l,
|
||||||
|
*fd,
|
||||||
|
tunCidr,
|
||||||
|
c.GetInt("tun.mtu", DefaultMTU),
|
||||||
|
routes,
|
||||||
|
c.GetInt("tun.tx_queue", 500),
|
||||||
|
c.GetBool("tun.use_system_route_table", false),
|
||||||
|
)
|
||||||
|
|
||||||
return removed
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,58 +6,47 @@ package overlay
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type tun struct {
|
type tun struct {
|
||||||
io.ReadWriteCloser
|
io.ReadWriteCloser
|
||||||
fd int
|
fd int
|
||||||
cidr netip.Prefix
|
cidr *net.IPNet
|
||||||
Routes atomic.Pointer[[]Route]
|
routeTree *cidr.Tree4[iputil.VpnIp]
|
||||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTunFromFd(c *config.C, l *logrus.Logger, deviceFd int, cidr netip.Prefix) (*tun, error) {
|
func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, _ int, routes []Route, _ int, _ bool) (*tun, error) {
|
||||||
// XXX Android returns an fd in non-blocking mode which is necessary for shutdown to work properly.
|
routeTree, err := makeRouteTree(l, routes, false)
|
||||||
// Be sure not to call file.Fd() as it will set the fd to blocking mode.
|
|
||||||
file := os.NewFile(uintptr(deviceFd), "/dev/net/tun")
|
|
||||||
|
|
||||||
t := &tun{
|
|
||||||
ReadWriteCloser: file,
|
|
||||||
fd: deviceFd,
|
|
||||||
cidr: cidr,
|
|
||||||
l: l,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := t.reload(c, true)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
// XXX Android returns an fd in non-blocking mode which is necessary for shutdown to work properly.
|
||||||
err := t.reload(c, false)
|
// Be sure not to call file.Fd() as it will set the fd to blocking mode.
|
||||||
if err != nil {
|
file := os.NewFile(uintptr(deviceFd), "/dev/net/tun")
|
||||||
util.LogWithContextIfNeeded("failed to reload tun device", err, t.l)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return t, nil
|
return &tun{
|
||||||
|
ReadWriteCloser: file,
|
||||||
|
fd: deviceFd,
|
||||||
|
cidr: cidr,
|
||||||
|
l: l,
|
||||||
|
routeTree: routeTree,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTun(_ *config.C, _ *logrus.Logger, _ netip.Prefix, _ bool) (*tun, error) {
|
func newTun(_ *logrus.Logger, _ string, _ *net.IPNet, _ int, _ []Route, _ int, _ bool, _ bool) (*tun, error) {
|
||||||
return nil, fmt.Errorf("newTun not supported in Android")
|
return nil, fmt.Errorf("newTun not supported in Android")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
|
||||||
r, _ := t.routeTree.Load().Lookup(ip)
|
_, r := t.routeTree.MostSpecificContains(ip)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,28 +54,7 @@ func (t tun) Activate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) reload(c *config.C, initial bool) error {
|
func (t *tun) Cidr() *net.IPNet {
|
||||||
change, routes, err := getAllRoutesFromConfig(c, t.cidr, initial)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !initial && !change {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
routeTree, err := makeRouteTree(t.l, routes, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Teach nebula how to handle the routes
|
|
||||||
t.Routes.Store(&routes)
|
|
||||||
t.routeTree.Store(routeTree)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) Cidr() netip.Prefix {
|
|
||||||
return t.cidr
|
return t.cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,16 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"os"
|
"os"
|
||||||
"sync/atomic"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/iputil"
|
||||||
netroute "golang.org/x/net/route"
|
netroute "golang.org/x/net/route"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
@@ -25,11 +22,10 @@ import (
|
|||||||
type tun struct {
|
type tun struct {
|
||||||
io.ReadWriteCloser
|
io.ReadWriteCloser
|
||||||
Device string
|
Device string
|
||||||
cidr netip.Prefix
|
cidr *net.IPNet
|
||||||
DefaultMTU int
|
DefaultMTU int
|
||||||
Routes atomic.Pointer[[]Route]
|
Routes []Route
|
||||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
routeTree *cidr.Tree4[iputil.VpnIp]
|
||||||
linkAddr *netroute.LinkAddr
|
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
|
|
||||||
// cache out buffer since we need to prepend 4 bytes for tun metadata
|
// cache out buffer since we need to prepend 4 bytes for tun metadata
|
||||||
@@ -73,8 +69,12 @@ type ifreqMTU struct {
|
|||||||
pad [8]byte
|
pad [8]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTun(c *config.C, l *logrus.Logger, cidr netip.Prefix, _ bool) (*tun, error) {
|
func newTun(l *logrus.Logger, name string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool, _ bool) (*tun, error) {
|
||||||
name := c.GetString("tun.dev", "")
|
routeTree, err := makeRouteTree(l, routes, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
ifIndex := -1
|
ifIndex := -1
|
||||||
if name != "" && name != "utun" {
|
if name != "" && name != "utun" {
|
||||||
_, err := fmt.Sscanf(name, "utun%d", &ifIndex)
|
_, err := fmt.Sscanf(name, "utun%d", &ifIndex)
|
||||||
@@ -142,27 +142,17 @@ func newTun(c *config.C, l *logrus.Logger, cidr netip.Prefix, _ bool) (*tun, err
|
|||||||
|
|
||||||
file := os.NewFile(uintptr(fd), "")
|
file := os.NewFile(uintptr(fd), "")
|
||||||
|
|
||||||
t := &tun{
|
tun := &tun{
|
||||||
ReadWriteCloser: file,
|
ReadWriteCloser: file,
|
||||||
Device: name,
|
Device: name,
|
||||||
cidr: cidr,
|
cidr: cidr,
|
||||||
DefaultMTU: c.GetInt("tun.mtu", DefaultMTU),
|
DefaultMTU: defaultMTU,
|
||||||
|
Routes: routes,
|
||||||
|
routeTree: routeTree,
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = t.reload(c, true)
|
return tun, nil
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
|
||||||
err := t.reload(c, false)
|
|
||||||
if err != nil {
|
|
||||||
util.LogWithContextIfNeeded("failed to reload tun device", err, t.l)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return t, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) deviceBytes() (o [16]byte) {
|
func (t *tun) deviceBytes() (o [16]byte) {
|
||||||
@@ -172,7 +162,7 @@ func (t *tun) deviceBytes() (o [16]byte) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ netip.Prefix) (*tun, error) {
|
func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) {
|
||||||
return nil, fmt.Errorf("newTunFromFd not supported in Darwin")
|
return nil, fmt.Errorf("newTunFromFd not supported in Darwin")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,13 +178,8 @@ func (t *tun) Activate() error {
|
|||||||
|
|
||||||
var addr, mask [4]byte
|
var addr, mask [4]byte
|
||||||
|
|
||||||
if !t.cidr.Addr().Is4() {
|
copy(addr[:], t.cidr.IP.To4())
|
||||||
//TODO: IPV6-WORK
|
copy(mask[:], t.cidr.Mask)
|
||||||
panic("need ipv6")
|
|
||||||
}
|
|
||||||
|
|
||||||
addr = t.cidr.Addr().As4()
|
|
||||||
copy(mask[:], prefixToMask(t.cidr))
|
|
||||||
|
|
||||||
s, err := unix.Socket(
|
s, err := unix.Socket(
|
||||||
unix.AF_INET,
|
unix.AF_INET,
|
||||||
@@ -275,7 +260,6 @@ func (t *tun) Activate() error {
|
|||||||
if linkAddr == nil {
|
if linkAddr == nil {
|
||||||
return fmt.Errorf("unable to discover link_addr for tun interface")
|
return fmt.Errorf("unable to discover link_addr for tun interface")
|
||||||
}
|
}
|
||||||
t.linkAddr = linkAddr
|
|
||||||
|
|
||||||
copy(routeAddr.IP[:], addr[:])
|
copy(routeAddr.IP[:], addr[:])
|
||||||
copy(maskAddr.IP[:], mask[:])
|
copy(maskAddr.IP[:], mask[:])
|
||||||
@@ -294,52 +278,38 @@ func (t *tun) Activate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unsafe path routes
|
// Unsafe path routes
|
||||||
return t.addRoutes(false)
|
for _, r := range t.Routes {
|
||||||
}
|
if r.Via == nil || !r.Install {
|
||||||
|
// We don't allow route MTUs so only install routes with a via
|
||||||
func (t *tun) reload(c *config.C, initial bool) error {
|
continue
|
||||||
change, routes, err := getAllRoutesFromConfig(c, t.cidr, initial)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !initial && !change {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
routeTree, err := makeRouteTree(t.l, routes, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Teach nebula how to handle the routes before establishing them in the system table
|
|
||||||
oldRoutes := t.Routes.Swap(&routes)
|
|
||||||
t.routeTree.Store(routeTree)
|
|
||||||
|
|
||||||
if !initial {
|
|
||||||
// Remove first, if the system removes a wanted route hopefully it will be re-added next
|
|
||||||
err := t.removeRoutes(findRemovedRoutes(routes, *oldRoutes))
|
|
||||||
if err != nil {
|
|
||||||
util.LogWithContextIfNeeded("Failed to remove routes", err, t.l)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure any routes we actually want are installed
|
copy(routeAddr.IP[:], r.Cidr.IP.To4())
|
||||||
err = t.addRoutes(true)
|
copy(maskAddr.IP[:], net.IP(r.Cidr.Mask).To4())
|
||||||
|
|
||||||
|
err = addRoute(routeSock, routeAddr, maskAddr, linkAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Catch any stray logs
|
if errors.Is(err, unix.EEXIST) {
|
||||||
util.LogWithContextIfNeeded("Failed to add routes", err, t.l)
|
t.l.WithField("route", r.Cidr).
|
||||||
|
Warnf("unable to add unsafe_route, identical route already exists")
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO how to set metric
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
|
||||||
r, ok := t.routeTree.Load().Lookup(ip)
|
ok, r := t.routeTree.MostSpecificContains(ip)
|
||||||
if ok {
|
if ok {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
return netip.Addr{}
|
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the LinkAddr for the interface of the given name
|
// Get the LinkAddr for the interface of the given name
|
||||||
@@ -370,99 +340,6 @@ func getLinkAddr(name string) (*netroute.LinkAddr, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) addRoutes(logErrors bool) error {
|
|
||||||
routeSock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
unix.Shutdown(routeSock, unix.SHUT_RDWR)
|
|
||||||
err := unix.Close(routeSock)
|
|
||||||
if err != nil {
|
|
||||||
t.l.WithError(err).Error("failed to close AF_ROUTE socket")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
routeAddr := &netroute.Inet4Addr{}
|
|
||||||
maskAddr := &netroute.Inet4Addr{}
|
|
||||||
routes := *t.Routes.Load()
|
|
||||||
for _, r := range routes {
|
|
||||||
if !r.Via.IsValid() || !r.Install {
|
|
||||||
// We don't allow route MTUs so only install routes with a via
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !r.Cidr.Addr().Is4() {
|
|
||||||
//TODO: implement ipv6
|
|
||||||
panic("Cant handle ipv6 routes yet")
|
|
||||||
}
|
|
||||||
|
|
||||||
routeAddr.IP = r.Cidr.Addr().As4()
|
|
||||||
//TODO: we could avoid the copy
|
|
||||||
copy(maskAddr.IP[:], prefixToMask(r.Cidr))
|
|
||||||
|
|
||||||
err := addRoute(routeSock, routeAddr, maskAddr, t.linkAddr)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, unix.EEXIST) {
|
|
||||||
t.l.WithField("route", r.Cidr).
|
|
||||||
Warnf("unable to add unsafe_route, identical route already exists")
|
|
||||||
} else {
|
|
||||||
retErr := util.NewContextualError("Failed to add route", map[string]interface{}{"route": r}, err)
|
|
||||||
if logErrors {
|
|
||||||
retErr.Log(t.l)
|
|
||||||
} else {
|
|
||||||
return retErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.l.WithField("route", r).Info("Added route")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) removeRoutes(routes []Route) error {
|
|
||||||
routeSock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
unix.Shutdown(routeSock, unix.SHUT_RDWR)
|
|
||||||
err := unix.Close(routeSock)
|
|
||||||
if err != nil {
|
|
||||||
t.l.WithError(err).Error("failed to close AF_ROUTE socket")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
routeAddr := &netroute.Inet4Addr{}
|
|
||||||
maskAddr := &netroute.Inet4Addr{}
|
|
||||||
|
|
||||||
for _, r := range routes {
|
|
||||||
if !r.Install {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Cidr.Addr().Is6() {
|
|
||||||
//TODO: implement ipv6
|
|
||||||
panic("Cant handle ipv6 routes yet")
|
|
||||||
}
|
|
||||||
|
|
||||||
routeAddr.IP = r.Cidr.Addr().As4()
|
|
||||||
copy(maskAddr.IP[:], prefixToMask(r.Cidr))
|
|
||||||
|
|
||||||
err := delRoute(routeSock, routeAddr, maskAddr, t.linkAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
|
||||||
} else {
|
|
||||||
t.l.WithField("route", r).Info("Removed route")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addRoute(sock int, addr, mask *netroute.Inet4Addr, link *netroute.LinkAddr) error {
|
func addRoute(sock int, addr, mask *netroute.Inet4Addr, link *netroute.LinkAddr) error {
|
||||||
r := netroute.RouteMessage{
|
r := netroute.RouteMessage{
|
||||||
Version: unix.RTM_VERSION,
|
Version: unix.RTM_VERSION,
|
||||||
@@ -488,30 +365,6 @@ func addRoute(sock int, addr, mask *netroute.Inet4Addr, link *netroute.LinkAddr)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func delRoute(sock int, addr, mask *netroute.Inet4Addr, link *netroute.LinkAddr) error {
|
|
||||||
r := netroute.RouteMessage{
|
|
||||||
Version: unix.RTM_VERSION,
|
|
||||||
Type: unix.RTM_DELETE,
|
|
||||||
Seq: 1,
|
|
||||||
Addrs: []netroute.Addr{
|
|
||||||
unix.RTAX_DST: addr,
|
|
||||||
unix.RTAX_GATEWAY: link,
|
|
||||||
unix.RTAX_NETMASK: mask,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := r.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
|
||||||
}
|
|
||||||
_, err = unix.Write(sock, data[:])
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) Read(to []byte) (int, error) {
|
func (t *tun) Read(to []byte) (int, error) {
|
||||||
|
|
||||||
buf := make([]byte, len(to)+4)
|
buf := make([]byte, len(to)+4)
|
||||||
@@ -551,7 +404,7 @@ func (t *tun) Write(from []byte) (int, error) {
|
|||||||
return n - 4, err
|
return n - 4, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Cidr() netip.Prefix {
|
func (t *tun) Cidr() *net.IPNet {
|
||||||
return t.cidr
|
return t.cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -562,11 +415,3 @@ func (t *tun) Name() string {
|
|||||||
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||||
return nil, fmt.Errorf("TODO: multiqueue not implemented for darwin")
|
return nil, fmt.Errorf("TODO: multiqueue not implemented for darwin")
|
||||||
}
|
}
|
||||||
|
|
||||||
func prefixToMask(prefix netip.Prefix) []byte {
|
|
||||||
pLen := 128
|
|
||||||
if prefix.Addr().Is4() {
|
|
||||||
pLen = 32
|
|
||||||
}
|
|
||||||
return net.CIDRMask(prefix.Bits(), pLen)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package overlay
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
type disabledTun struct {
|
type disabledTun struct {
|
||||||
read chan []byte
|
read chan []byte
|
||||||
cidr netip.Prefix
|
cidr *net.IPNet
|
||||||
|
|
||||||
// Track these metrics since we don't have the tun device to do it for us
|
// Track these metrics since we don't have the tun device to do it for us
|
||||||
tx metrics.Counter
|
tx metrics.Counter
|
||||||
@@ -21,7 +21,7 @@ type disabledTun struct {
|
|||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDisabledTun(cidr netip.Prefix, queueLen int, metricsEnabled bool, l *logrus.Logger) *disabledTun {
|
func newDisabledTun(cidr *net.IPNet, queueLen int, metricsEnabled bool, l *logrus.Logger) *disabledTun {
|
||||||
tun := &disabledTun{
|
tun := &disabledTun{
|
||||||
cidr: cidr,
|
cidr: cidr,
|
||||||
read: make(chan []byte, queueLen),
|
read: make(chan []byte, queueLen),
|
||||||
@@ -43,11 +43,11 @@ func (*disabledTun) Activate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*disabledTun) RouteFor(addr netip.Addr) netip.Addr {
|
func (*disabledTun) RouteFor(iputil.VpnIp) iputil.VpnIp {
|
||||||
return netip.Addr{}
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *disabledTun) Cidr() netip.Prefix {
|
func (t *disabledTun) Cidr() *net.IPNet {
|
||||||
return t.cidr
|
return t.cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,18 +9,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync/atomic"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/cidr"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/iputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,10 +45,10 @@ type ifreqDestroy struct {
|
|||||||
|
|
||||||
type tun struct {
|
type tun struct {
|
||||||
Device string
|
Device string
|
||||||
cidr netip.Prefix
|
cidr *net.IPNet
|
||||||
MTU int
|
MTU int
|
||||||
Routes atomic.Pointer[[]Route]
|
Routes []Route
|
||||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
routeTree *cidr.Tree4[iputil.VpnIp]
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
|
|
||||||
io.ReadWriteCloser
|
io.ReadWriteCloser
|
||||||
@@ -78,15 +76,14 @@ func (t *tun) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ netip.Prefix) (*tun, error) {
|
func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) {
|
||||||
return nil, fmt.Errorf("newTunFromFd not supported in FreeBSD")
|
return nil, fmt.Errorf("newTunFromFd not supported in FreeBSD")
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTun(c *config.C, l *logrus.Logger, cidr netip.Prefix, _ bool) (*tun, error) {
|
func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool, _ bool) (*tun, error) {
|
||||||
// Try to open existing tun device
|
// Try to open existing tun device
|
||||||
var file *os.File
|
var file *os.File
|
||||||
var err error
|
var err error
|
||||||
deviceName := c.GetString("tun.dev", "")
|
|
||||||
if deviceName != "" {
|
if deviceName != "" {
|
||||||
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
||||||
}
|
}
|
||||||
@@ -147,97 +144,59 @@ func newTun(c *config.C, l *logrus.Logger, cidr netip.Prefix, _ bool) (*tun, err
|
|||||||
ioctl(fd, syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&ifrr)))
|
ioctl(fd, syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&ifrr)))
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &tun{
|
routeTree, err := makeRouteTree(l, routes, false)
|
||||||
ReadWriteCloser: file,
|
|
||||||
Device: deviceName,
|
|
||||||
cidr: cidr,
|
|
||||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
|
||||||
l: l,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = t.reload(c, true)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
return &tun{
|
||||||
err := t.reload(c, false)
|
ReadWriteCloser: file,
|
||||||
if err != nil {
|
Device: deviceName,
|
||||||
util.LogWithContextIfNeeded("failed to reload tun device", err, t.l)
|
cidr: cidr,
|
||||||
}
|
MTU: defaultMTU,
|
||||||
})
|
Routes: routes,
|
||||||
|
routeTree: routeTree,
|
||||||
return t, nil
|
l: l,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Activate() error {
|
func (t *tun) Activate() error {
|
||||||
var err error
|
var err error
|
||||||
// TODO use syscalls instead of exec.Command
|
// TODO use syscalls instead of exec.Command
|
||||||
cmd := exec.Command("/sbin/ifconfig", t.Device, t.cidr.String(), t.cidr.Addr().String())
|
t.l.Debug("command: ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String())
|
||||||
t.l.Debug("command: ", cmd.String())
|
if err = exec.Command("/sbin/ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String()).Run(); err != nil {
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||||
}
|
}
|
||||||
|
t.l.Debug("command: route", "-n", "add", "-net", t.cidr.String(), "-interface", t.Device)
|
||||||
cmd = exec.Command("/sbin/route", "-n", "add", "-net", t.cidr.String(), "-interface", t.Device)
|
if err = exec.Command("/sbin/route", "-n", "add", "-net", t.cidr.String(), "-interface", t.Device).Run(); err != nil {
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'route add': %s", err)
|
return fmt.Errorf("failed to run 'route add': %s", err)
|
||||||
}
|
}
|
||||||
|
t.l.Debug("command: ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
||||||
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
if err = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU)).Run(); err != nil {
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unsafe path routes
|
// Unsafe path routes
|
||||||
return t.addRoutes(false)
|
for _, r := range t.Routes {
|
||||||
}
|
if r.Via == nil || !r.Install {
|
||||||
|
// We don't allow route MTUs so only install routes with a via
|
||||||
func (t *tun) reload(c *config.C, initial bool) error {
|
continue
|
||||||
change, routes, err := getAllRoutesFromConfig(c, t.cidr, initial)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !initial && !change {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
routeTree, err := makeRouteTree(t.l, routes, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Teach nebula how to handle the routes before establishing them in the system table
|
|
||||||
oldRoutes := t.Routes.Swap(&routes)
|
|
||||||
t.routeTree.Store(routeTree)
|
|
||||||
|
|
||||||
if !initial {
|
|
||||||
// Remove first, if the system removes a wanted route hopefully it will be re-added next
|
|
||||||
err := t.removeRoutes(findRemovedRoutes(routes, *oldRoutes))
|
|
||||||
if err != nil {
|
|
||||||
util.LogWithContextIfNeeded("Failed to remove routes", err, t.l)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure any routes we actually want are installed
|
t.l.Debug("command: route", "-n", "add", "-net", r.Cidr.String(), "-interface", t.Device)
|
||||||
err = t.addRoutes(true)
|
if err = exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), "-interface", t.Device).Run(); err != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("failed to run 'route add' for unsafe_route %s: %s", r.Cidr.String(), err)
|
||||||
// Catch any stray logs
|
|
||||||
util.LogWithContextIfNeeded("Failed to add routes", err, t.l)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
|
||||||
r, _ := t.routeTree.Load().Lookup(ip)
|
_, r := t.routeTree.MostSpecificContains(ip)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Cidr() netip.Prefix {
|
func (t *tun) Cidr() *net.IPNet {
|
||||||
return t.cidr
|
return t.cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -249,46 +208,6 @@ func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
|||||||
return nil, fmt.Errorf("TODO: multiqueue not implemented for freebsd")
|
return nil, fmt.Errorf("TODO: multiqueue not implemented for freebsd")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) addRoutes(logErrors bool) error {
|
|
||||||
routes := *t.Routes.Load()
|
|
||||||
for _, r := range routes {
|
|
||||||
if !r.Via.IsValid() || !r.Install {
|
|
||||||
// We don't allow route MTUs so only install routes with a via
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), "-interface", t.Device)
|
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
retErr := util.NewContextualError("failed to run 'route add' for unsafe_route", map[string]interface{}{"route": r}, err)
|
|
||||||
if logErrors {
|
|
||||||
retErr.Log(t.l)
|
|
||||||
} else {
|
|
||||||
return retErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) removeRoutes(routes []Route) error {
|
|
||||||
for _, r := range routes {
|
|
||||||
if !r.Install {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command("/sbin/route", "-n", "delete", "-net", r.Cidr.String(), "-interface", t.Device)
|
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
|
||||||
} else {
|
|
||||||
t.l.WithField("route", r).Info("Removed route")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) deviceBytes() (o [16]byte) {
|
func (t *tun) deviceBytes() (o [16]byte) {
|
||||||
for i, c := range t.Device {
|
for i, c := range t.Device {
|
||||||
o[i] = byte(c)
|
o[i] = byte(c)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user