Compare commits

..

8 Commits

Author SHA1 Message Date
Dave Russell
db11e2f1af Revert "smoke test"
This reverts commit fa034a6d83.
2020-10-03 00:09:18 +10:00
Dave Russell
2ee428b067 Hook send should use a code path that actually firewalls
This change enforces that outbound hook traffic will actually be checked
by the firewall and added to the conntrack if allowed.
2020-10-02 23:42:20 +10:00
Dave Russell
e9657d571e control->Send: Also set the src port
With the source port also set, we only need to enable inbound
firewall rules on the 'server' side of the connection, as
the conntrack will allow replies.
2020-10-02 22:25:31 +10:00
Dave Russell
3cebf38504 The custom message packet sender needs a dest port
Source/Dest ports are required for the nebula firewall on the
receiving side, allow the port to be configured so that it can
be matched to specific rules as required.
2020-10-02 20:46:08 +10:00
Dave Russell
ae3ee42469 Provide hooks for custom message packet handlers
This commit augments the Control API by providing new methods to
inject message packets destined peer nodes, and/or to intercept
message packets of a custom message subtype that are received from
peer nodes.
2020-09-28 22:31:19 +10:00
Dave Russell
fa034a6d83 smoke test 2020-09-27 22:43:24 +10:00
Dave Russell
55d72ac46f Tighten up the inside handlers with a bit of DRY 2020-09-27 22:37:20 +10:00
Dave Russell
2c931d5691 Move inside packet handlers into map
This commit moves the inside packet handlers into a map of functions
from the large switch statement. The functions are mapped by packet
protocol version, type and subtype; which makes it simpler to inject
either a new protocol version and/or custom handlers.
2020-09-27 22:04:14 +10:00
189 changed files with 6519 additions and 21672 deletions

View File

@@ -1,57 +0,0 @@
name: "\U0001F41B Bug Report"
description: Report an issue or possible bug
title: "\U0001F41B BUG:"
labels: []
assignees: []
body:
- type: markdown
attributes:
value: |
### Thank you for taking the time to file a bug report!
Please fill out this form as completely as possible.
- type: input
id: version
attributes:
label: What version of `nebula` are you using?
placeholder: 0.0.0
validations:
required: true
- type: input
id: os
attributes:
label: What operating system are you using?
description: iOS and Android specific issues belong in the [mobile_nebula](https://github.com/DefinedNet/mobile_nebula) repo.
placeholder: Linux, Mac, Windows
validations:
required: true
- type: textarea
id: description
attributes:
label: Describe the Bug
description: A clear and concise description of what the bug is.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs from affected hosts
description: |
Provide logs from all affected hosts during the time of the issue.
Improve formatting by using <code>```</code> at the beginning and end of each log block.
validations:
required: false
- type: textarea
id: configs
attributes:
label: Config files from affected hosts
description: |
Provide config files for all affected hosts.
Improve formatting by using <code>```</code> at the beginning and end of each config file.
validations:
required: false

View File

@@ -1,13 +0,0 @@
blank_issues_enabled: true
contact_links:
- name: 📘 Documentation
url: https://nebula.defined.net/docs/
about: Review documentation.
- name: 💁 Support/Chat
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
- name: 📱 Mobile Nebula
url: https://github.com/definednet/mobile_nebula
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'

View File

@@ -14,30 +14,31 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.15
uses: actions/setup-go@v2 uses: actions/setup-go@v1
with: with:
go-version: "1.20" go-version: 1.15
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v2 uses: actions/checkout@v1
- uses: actions/cache@v2 - uses: actions/cache@v1
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-gofmt1.20-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-gofmt-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-gofmt1.20- ${{ runner.os }}-gofmt-
- name: Install goimports - name: Install goimports
run: | run: |
go install golang.org/x/tools/cmd/goimports@latest go get golang.org/x/tools/cmd/goimports
go build golang.org/x/tools/cmd/goimports
- name: gofmt - name: gofmt
run: | run: |
if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs goimports -l)" ] if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -l)" ]
then then
find . -iname '*.go' | grep -v '\.pb\.go$' | xargs goimports -d find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -d
exit 1 exit 1
fi fi

View File

@@ -10,10 +10,10 @@ jobs:
name: Build Linux All name: Build Linux All
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.15
uses: actions/setup-go@v2 uses: actions/setup-go@v1
with: with:
go-version: "1.20" go-version: 1.15
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
@@ -25,19 +25,19 @@ jobs:
mv build/*.tar.gz release mv build/*.tar.gz release
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v1
with: with:
name: linux-latest name: linux-latest
path: release path: release
build-windows: build-windows:
name: Build Windows name: Build Windows amd64
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.15
uses: actions/setup-go@v2 uses: actions/setup-go@v1
with: with:
go-version: "1.20" go-version: 1.15
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
@@ -45,88 +45,63 @@ jobs:
- name: Build - name: Build
run: | run: |
echo $Env:GITHUB_REF.Substring(11) echo $Env:GITHUB_REF.Substring(11)
mkdir build\windows-amd64 go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\nebula.exe ./cmd/nebula-service
$Env:GOARCH = "amd64" go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\nebula-cert.exe ./cmd/nebula-cert
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-amd64\nebula.exe ./cmd/nebula-service
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-amd64\nebula-cert.exe ./cmd/nebula-cert
mkdir build\windows-arm64
$Env:GOARCH = "arm64"
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-arm64\nebula.exe ./cmd/nebula-service
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-arm64\nebula-cert.exe ./cmd/nebula-cert
mkdir build\dist\windows
mv dist\windows\wintun build\dist\windows\
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v1
with: with:
name: windows-latest name: windows-latest
path: build path: build
build-darwin: build-darwin:
name: Build Universal Darwin name: Build Darwin amd64
env: runs-on: macOS-latest
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
runs-on: macos-11
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.15
uses: actions/setup-go@v2 uses: actions/setup-go@v1
with: with:
go-version: "1.20" go-version: 1.15
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Import certificates - name: Build
if: env.HAS_SIGNING_CREDS == 'true'
uses: Apple-Actions/import-codesign-certs@v1
with:
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
- name: Build, sign, and notarize
env:
AC_USERNAME: ${{ secrets.AC_USERNAME }}
AC_PASSWORD: ${{ secrets.AC_PASSWORD }}
run: | run: |
rm -rf release make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/nebula-darwin-amd64.tar.gz
mkdir release mkdir release
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/darwin-amd64/nebula build/darwin-amd64/nebula-cert mv build/*.tar.gz release
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/darwin-arm64/nebula build/darwin-arm64/nebula-cert
lipo -create -output ./release/nebula ./build/darwin-amd64/nebula ./build/darwin-arm64/nebula
lipo -create -output ./release/nebula-cert ./build/darwin-amd64/nebula-cert ./build/darwin-arm64/nebula-cert
if [ -n "$AC_USERNAME" ]; then
codesign -s "10BC1FDDEB6CE753550156C0669109FAC49E4D1E" -f -v --timestamp --options=runtime -i "net.defined.nebula" ./release/nebula
codesign -s "10BC1FDDEB6CE753550156C0669109FAC49E4D1E" -f -v --timestamp --options=runtime -i "net.defined.nebula-cert" ./release/nebula-cert
fi
zip -j release/nebula-darwin.zip release/nebula-cert release/nebula
if [ -n "$AC_USERNAME" ]; then
xcrun notarytool submit ./release/nebula-darwin.zip --team-id "576H3XS7FP" --apple-id "$AC_USERNAME" --password "$AC_PASSWORD" --wait
fi
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v1
with: with:
name: darwin-latest name: darwin-latest
path: ./release/* path: release
release: release:
name: Create and Upload Release name: Create and Upload Release
needs: [build-linux, build-darwin, build-windows] needs: [build-linux, build-darwin, build-windows]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download artifacts - name: Download Linux artifacts
uses: actions/download-artifact@v2 uses: actions/download-artifact@v1
with:
name: linux-latest
- name: Download Darwin artifacts
uses: actions/download-artifact@v1
with:
name: darwin-latest
- name: Download Windows artifacts
uses: actions/download-artifact@v1
with:
name: windows-latest
- name: Zip Windows - name: Zip Windows
run: | run: |
cd windows-latest cd windows-latest
cp windows-amd64/* . zip nebula-windows-amd64.zip nebula.exe nebula-cert.exe
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
cp windows-arm64/* .
zip -r nebula-windows-arm64.zip nebula.exe nebula-cert.exe dist
- name: Create sha256sum - name: Create sha256sum
run: | run: |
@@ -136,17 +111,9 @@ jobs:
cd $dir cd $dir
if [ "$dir" = windows-latest ] if [ "$dir" = windows-latest ]
then then
sha256sum <windows-amd64/nebula.exe | sed 's=-$=nebula-windows-amd64.zip/nebula.exe=' sha256sum <nebula.exe | sed 's=-$=nebula-windows-amd64.zip/nebula.exe='
sha256sum <windows-amd64/nebula-cert.exe | sed 's=-$=nebula-windows-amd64.zip/nebula-cert.exe=' sha256sum <nebula-cert.exe | sed 's=-$=nebula-windows-amd64.zip/nebula-cert.exe='
sha256sum <windows-arm64/nebula.exe | sed 's=-$=nebula-windows-arm64.zip/nebula.exe='
sha256sum <windows-arm64/nebula-cert.exe | sed 's=-$=nebula-windows-arm64.zip/nebula-cert.exe='
sha256sum nebula-windows-amd64.zip sha256sum nebula-windows-amd64.zip
sha256sum nebula-windows-arm64.zip
elif [ "$dir" = darwin-latest ]
then
sha256sum <nebula-darwin.zip | sed 's=-$=nebula-darwin.zip='
sha256sum <nebula | sed 's=-$=nebula-darwin.zip/nebula='
sha256sum <nebula-cert | sed 's=-$=nebula-darwin.zip/nebula-cert='
else else
for v in *.tar.gz for v in *.tar.gz
do do
@@ -182,15 +149,15 @@ jobs:
asset_name: SHASUM256.txt asset_name: SHASUM256.txt
asset_content_type: text/plain asset_content_type: text/plain
- name: Upload darwin zip - name: Upload darwin-amd64
uses: actions/upload-release-asset@v1.0.1 uses: actions/upload-release-asset@v1.0.1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
upload_url: ${{ steps.create_release.outputs.upload_url }} upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./darwin-latest/nebula-darwin.zip asset_path: ./darwin-latest/nebula-darwin-amd64.tar.gz
asset_name: nebula-darwin.zip asset_name: nebula-darwin-amd64.tar.gz
asset_content_type: application/zip asset_content_type: application/gzip
- name: Upload windows-amd64 - name: Upload windows-amd64
uses: actions/upload-release-asset@v1.0.1 uses: actions/upload-release-asset@v1.0.1
@@ -202,16 +169,6 @@ jobs:
asset_name: nebula-windows-amd64.zip asset_name: nebula-windows-amd64.zip
asset_content_type: application/zip asset_content_type: application/zip
- name: Upload windows-arm64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./windows-latest/nebula-windows-arm64.zip
asset_name: nebula-windows-arm64.zip
asset_content_type: application/zip
- name: Upload linux-amd64 - name: Upload linux-amd64
uses: actions/upload-release-asset@v1.0.1 uses: actions/upload-release-asset@v1.0.1
env: env:
@@ -332,16 +289,6 @@ jobs:
asset_name: nebula-linux-mips-softfloat.tar.gz asset_name: nebula-linux-mips-softfloat.tar.gz
asset_content_type: application/gzip asset_content_type: application/gzip
- name: Upload linux-riscv64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
asset_name: nebula-linux-riscv64.tar.gz
asset_content_type: application/gzip
- name: Upload freebsd-amd64 - name: Upload freebsd-amd64
uses: actions/upload-release-asset@v1.0.1 uses: actions/upload-release-asset@v1.0.1
env: env:

View File

@@ -18,24 +18,24 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.15
uses: actions/setup-go@v2 uses: actions/setup-go@v1
with: with:
go-version: "1.20" go-version: 1.15
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v2 uses: actions/checkout@v1
- uses: actions/cache@v2 - uses: actions/cache@v1
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-go1.20- ${{ runner.os }}-go-
- name: build - name: build
run: make bin-docker run: make
- name: setup docker image - name: setup docker image
working-directory: ./.github/workflows/smoke working-directory: ./.github/workflows/smoke
@@ -45,20 +45,4 @@ jobs:
working-directory: ./.github/workflows/smoke working-directory: ./.github/workflows/smoke
run: ./smoke.sh run: ./smoke.sh
- name: setup relay docker image
working-directory: ./.github/workflows/smoke
run: ./build-relay.sh
- name: run smoke relay
working-directory: ./.github/workflows/smoke
run: ./smoke-relay.sh
- name: setup docker image for P256
working-directory: ./.github/workflows/smoke
run: NAME="smoke-p256" CURVE=P256 ./build.sh
- name: run smoke-p256
working-directory: ./.github/workflows/smoke
run: NAME="smoke-p256" ./smoke.sh
timeout-minutes: 10 timeout-minutes: 10

View File

@@ -1,9 +1,5 @@
FROM ubuntu:jammy FROM debian:buster
RUN apt-get update && apt-get install -y iputils-ping ncat tcpdump ADD ./build /
ADD ./build /nebula ENTRYPOINT ["/nebula"]
WORKDIR /nebula
ENTRYPOINT ["/nebula/nebula"]

View File

@@ -1,44 +0,0 @@
#!/bin/sh
set -e -x
rm -rf ./build
mkdir ./build
(
cd build
cp ../../../../build/linux-amd64/nebula .
cp ../../../../build/linux-amd64/nebula-cert .
HOST="lighthouse1" AM_LIGHTHOUSE=true ../genconfig.sh >lighthouse1.yml <<EOF
relay:
am_relay: true
EOF
export LIGHTHOUSES="192.168.100.1 172.17.0.2:4242"
export REMOTE_ALLOW_LIST='{"172.17.0.4/32": false, "172.17.0.5/32": false}'
HOST="host2" ../genconfig.sh >host2.yml <<EOF
relay:
relays:
- 192.168.100.1
EOF
export REMOTE_ALLOW_LIST='{"172.17.0.3/32": false}'
HOST="host3" ../genconfig.sh >host3.yml
HOST="host4" ../genconfig.sh >host4.yml <<EOF
relay:
use_relays: false
EOF
../../../../nebula-cert ca -name "Smoke Test"
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
)
sudo docker build -t nebula:smoke-relay .

View File

@@ -8,8 +8,8 @@ mkdir ./build
( (
cd build cd build
cp ../../../../build/linux-amd64/nebula . cp ../../../../nebula .
cp ../../../../build/linux-amd64/nebula-cert . cp ../../../../nebula-cert .
HOST="lighthouse1" \ HOST="lighthouse1" \
AM_LIGHTHOUSE=true \ AM_LIGHTHOUSE=true \
@@ -29,11 +29,11 @@ mkdir ./build
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \ OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
../genconfig.sh >host4.yml ../genconfig.sh >host4.yml
../../../../nebula-cert ca -curve "${CURVE:-25519}" -name "Smoke Test" ./nebula-cert ca -name "Smoke Test"
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24" ./nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24" ./nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24" ./nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24" ./nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
) )
sudo docker build -t "nebula:${NAME:-smoke}" . docker build -t nebula:smoke .

View File

@@ -33,14 +33,13 @@ lighthouse_hosts() {
cat <<EOF cat <<EOF
pki: pki:
ca: ca.crt ca: /ca.crt
cert: ${HOST}.crt cert: /${HOST}.crt
key: ${HOST}.key key: /${HOST}.key
lighthouse: lighthouse:
am_lighthouse: ${AM_LIGHTHOUSE:-false} am_lighthouse: ${AM_LIGHTHOUSE:-false}
hosts: $(lighthouse_hosts) hosts: $(lighthouse_hosts)
remote_allow_list: ${REMOTE_ALLOW_LIST}
listen: listen:
host: 0.0.0.0 host: 0.0.0.0
@@ -50,10 +49,6 @@ tun:
dev: ${TUN_DEV:-nebula1} dev: ${TUN_DEV:-nebula1}
firewall: firewall:
inbound_action: reject
outbound_action: reject
outbound: ${OUTBOUND:-$FIREWALL_ALL} outbound: ${OUTBOUND:-$FIREWALL_ALL}
inbound: ${INBOUND:-$FIREWALL_ALL} inbound: ${INBOUND:-$FIREWALL_ALL}
$(test -t 0 || cat)
EOF EOF

View File

@@ -1,85 +0,0 @@
#!/bin/bash
set -e -x
set -o pipefail
mkdir -p logs
cleanup() {
echo
echo " *** cleanup"
echo
set +e
if [ "$(jobs -r)" ]
then
sudo docker kill lighthouse1 host2 host3 host4
fi
}
trap cleanup EXIT
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
sleep 1
set +x
echo
echo " *** Testing ping from lighthouse1"
echo
set -x
sudo docker exec lighthouse1 ping -c1 192.168.100.2
sudo docker exec lighthouse1 ping -c1 192.168.100.3
sudo docker exec lighthouse1 ping -c1 192.168.100.4
set +x
echo
echo " *** Testing ping from host2"
echo
set -x
sudo docker exec host2 ping -c1 192.168.100.1
# Should fail because no relay configured in this direction
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
set +x
echo
echo " *** Testing ping from host3"
echo
set -x
sudo docker exec host3 ping -c1 192.168.100.1
sudo docker exec host3 ping -c1 192.168.100.2
sudo docker exec host3 ping -c1 192.168.100.4
set +x
echo
echo " *** Testing ping from host4"
echo
set -x
sudo docker exec host4 ping -c1 192.168.100.1
# Should fail because relays not allowed
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
sudo docker exec host4 ping -c1 192.168.100.3
sudo docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1'
sleep 1
if [ "$(jobs -r)" ]
then
echo "nebula still running after SIGTERM sent" >&2
exit 1
fi

View File

@@ -1,118 +1,55 @@
#!/bin/bash #!/bin/sh
set -e -x set -e -x
set -o pipefail docker run --name lighthouse1 --rm nebula:smoke -config lighthouse1.yml -test
docker run --name host2 --rm nebula:smoke -config host2.yml -test
docker run --name host3 --rm nebula:smoke -config host3.yml -test
docker run --name host4 --rm nebula:smoke -config host4.yml -test
mkdir -p logs docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml &
cleanup() {
echo
echo " *** cleanup"
echo
set +e
if [ "$(jobs -r)" ]
then
sudo docker kill lighthouse1 host2 host3 host4
fi
}
trap cleanup EXIT
CONTAINER="nebula:${NAME:-smoke}"
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1 sleep 1
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' & docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml &
sleep 1 sleep 1
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' & docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host3.yml &
sleep 1 sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' & docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host4.yml &
sleep 1 sleep 1
# grab tcpdump pcaps for debugging
sudo docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
sudo docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
sudo docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
sudo docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
sudo docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
sudo docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
sudo docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
sudo docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
sudo docker exec host2 ncat -nklv 0.0.0.0 2000 &
sudo docker exec host3 ncat -nklv 0.0.0.0 2000 &
sudo docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
sudo docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
set +x set +x
echo echo
echo " *** Testing ping from lighthouse1" echo " *** Testing ping from lighthouse1"
echo echo
set -x set -x
sudo docker exec lighthouse1 ping -c1 192.168.100.2 docker exec lighthouse1 ping -c1 192.168.100.2
sudo docker exec lighthouse1 ping -c1 192.168.100.3 docker exec lighthouse1 ping -c1 192.168.100.3
set +x set +x
echo echo
echo " *** Testing ping from host2" echo " *** Testing ping from host2"
echo echo
set -x set -x
sudo docker exec host2 ping -c1 192.168.100.1 docker exec host2 ping -c1 192.168.100.1
# Should fail because not allowed by host3 inbound firewall # Should fail because not allowed by host3 inbound firewall
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1 ! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
set +x
echo
echo " *** Testing ncat from host2"
echo
set -x
# Should fail because not allowed by host3 inbound firewall
! sudo docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! sudo docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
set +x set +x
echo echo
echo " *** Testing ping from host3" echo " *** Testing ping from host3"
echo echo
set -x set -x
sudo docker exec host3 ping -c1 192.168.100.1 docker exec host3 ping -c1 192.168.100.1
sudo docker exec host3 ping -c1 192.168.100.2 docker exec host3 ping -c1 192.168.100.2
set +x
echo
echo " *** Testing ncat from host3"
echo
set -x
sudo docker exec host3 ncat -nzv -w5 192.168.100.2 2000
sudo docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
set +x set +x
echo echo
echo " *** Testing ping from host4" echo " *** Testing ping from host4"
echo echo
set -x set -x
sudo docker exec host4 ping -c1 192.168.100.1 docker exec host4 ping -c1 192.168.100.1
# Should fail because not allowed by host4 outbound firewall # Should fail because not allowed by host4 outbound firewall
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1 ! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1 ! docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
set +x
echo
echo " *** Testing ncat from host4"
echo
set -x
# Should fail because not allowed by host4 outbound firewall
! sudo docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
! sudo docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
set +x set +x
echo echo
@@ -120,19 +57,7 @@ echo " *** Testing conntrack"
echo echo
set -x set -x
# host2 can ping host3 now that host3 pinged it first # host2 can ping host3 now that host3 pinged it first
sudo docker exec host2 ping -c1 192.168.100.3 docker exec host2 ping -c1 192.168.100.3
# host4 can ping host2 once conntrack established # host4 can ping host2 once conntrack established
sudo docker exec host2 ping -c1 192.168.100.4 docker exec host2 ping -c1 192.168.100.4
sudo docker exec host4 ping -c1 192.168.100.2 docker exec host4 ping -c1 192.168.100.2
sudo docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1'
sleep 1
if [ "$(jobs -r)" ]
then
echo "nebula still running after SIGTERM sent" >&2
exit 1
fi

View File

@@ -18,93 +18,51 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.15
uses: actions/setup-go@v2 uses: actions/setup-go@v1
with: with:
go-version: "1.20" go-version: 1.15
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v2 uses: actions/checkout@v1
- uses: actions/cache@v2 - uses: actions/cache@v1
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-go1.20- ${{ runner.os }}-go-
- name: Build - name: Build
run: make all run: make all
- name: Vet
run: make vet
- name: Test - name: Test
run: make test run: make test
- name: End 2 end
run: make e2evv
- uses: actions/upload-artifact@v3
with:
name: e2e packet flow
path: e2e/mermaid/
if-no-files-found: warn
test-linux-boringcrypto:
name: Build and test on linux with boringcrypto
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.20-
- name: Build
run: make bin-boringcrypto
- name: Test
run: make test-boringcrypto
- name: End 2 end
run: make e2evv GOEXPERIMENT=boringcrypto CGO_ENABLED=1
test: test:
name: Build and test on ${{ matrix.os }} name: Build and test on ${{ matrix.os }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [windows-latest, macos-11] os: [windows-latest, macOS-latest]
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.15
uses: actions/setup-go@v2 uses: actions/setup-go@v1
with: with:
go-version: "1.20" go-version: 1.15
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v2 uses: actions/checkout@v1
- uses: actions/cache@v2 - uses: actions/cache@v1
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-go1.20- ${{ runner.os }}-go-
- name: Build nebula - name: Build nebula
run: go build ./cmd/nebula run: go build ./cmd/nebula
@@ -112,17 +70,5 @@ jobs:
- name: Build nebula-cert - name: Build nebula-cert
run: go build ./cmd/nebula-cert run: go build ./cmd/nebula-cert
- name: Vet
run: make vet
- name: Test - name: Test
run: make test run: go test -v ./...
- name: End 2 end
run: make e2evv
- uses: actions/upload-artifact@v3
with:
name: e2e packet flow
path: e2e/mermaid/
if-no-files-found: warn

9
.gitignore vendored
View File

@@ -4,14 +4,9 @@
/nebula-arm6 /nebula-arm6
/nebula-darwin /nebula-darwin
/nebula.exe /nebula.exe
/nebula-cert.exe /cert/*.crt
/cert/*.key
/coverage.out /coverage.out
/cpu.pprof /cpu.pprof
/build /build
/*.tar.gz /*.tar.gz
/e2e/mermaid/
**.crt
**.key
**.pem
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt

View File

@@ -7,300 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [1.7.0] - 2023-05-17
### Added
- `nebula-cert ca` now supports encrypting the CA's private key with a
passphrase. Pass `-encrypt` in order to be prompted for a passphrase.
Encryption is performed using AES-256-GCM and Argon2id for KDF. KDF
parameters default to RFC recommendations, but can be overridden via CLI
flags `-argon-memory`, `-argon-parallelism`, and `-argon-iterations`. (#386)
- Support for curve P256 and BoringCrypto has been added. See README section
"Curve P256 and BoringCrypto" for more details. (#865, #861, #769, #856, #803)
- New firewall rule `local_cidr`. This could be used to filter destinations
when using `unsafe_routes`. (#507)
- Add `unsafe_route` option `install`. This controls whether the route is
installed in the systems routing table. (#831)
- Add `tun.use_system_route_table` option. Set to true to manage unsafe routes
directly on the system route table with gateway routes instead of in Nebula
configuration files. This is only supported on Linux. (#839)
- The metric `certificate.ttl_seconds` is now exposed via stats. (#782)
- Add `punchy.respond_delay` option. This allows you to change the delay
before attempting punchy.respond. Default is 5 seconds. (#721)
- Added SSH commands to allow the capture of a mutex profile. (#737)
- You can now set `lighthouse.calculated_remotes` to make it possible to do
handshakes without a lighthouse in certain configurations. (#759)
- The firewall can be configured to send REJECT replies instead of the default
DROP behavior. (#738)
- For macOS, an example launchd configuration file is now provided. (#762)
### Changed ### Changed
- Lighthouses and other `static_host_map` entries that use DNS names will now - Updated the kardianos/service go library from 1.0.0 to 1.1.0, which
be automatically refreshed to detect when the IP address changes. (#796) now creates launchd plist to write stdout/stderr to files by default.
- Lighthouses send ACK replies back to clients so that they do not fall into
connection testing as often by clients. (#851, #408)
- Allow the `listen.host` option to contain a hostname. (#825)
- When Nebula switches to a new certificate (such as via SIGHUP), we now
rehandshake with all existing tunnels. This allows firewall groups to be
updated and `pki.disconnect_invalid` to know about the new certificate
expiration time. (#838, #857, #842, #840, #835, #828, #820, #807)
### Fixed
- Always disconnect blocklisted hosts, even if `pki.disconnect_invalid` is
not set. (#858)
- Dependencies updated and go1.20 required. (#780, #824, #855, #854)
- Fix possible race condition with relays. (#827)
- FreeBSD: Fix connection to the localhost's own Nebula IP. (#808)
- Normalize and document some common log field values. (#837, #811)
- Fix crash if you set unlucky values for the firewall timeout configuration
options. (#802)
- Make DNS queries case insensitive. (#793)
- Update example systemd configurations to want `nss-lookup`. (#791)
- Errors with SSH commands now go to the SSH tunnel instead of stderr. (#757)
- Fix a hang when shutting down Android. (#772)
## [1.6.1] - 2022-09-26
### Fixed
- Refuse to process underlay packets received from overlay IPs. This prevents
confusion on hosts that have unsafe routes configured. (#741)
- The ssh `reload` command did not work on Windows, since it relied on sending
a SIGHUP signal internally. This has been fixed. (#725)
- A regression in v1.5.2 that broke unsafe routes on Mobile clients has been
fixed. (#729)
## [1.6.0] - 2022-06-30
### Added
- Experimental: nebula clients can be configured to act as relays for other nebula clients.
Primarily useful when stubborn NATs make a direct tunnel impossible. (#678)
- Configuration option to report manually specified `ip:port`s to lighthouses. (#650)
- Windows arm64 build. (#638)
- `punchy` and most `lighthouse` config options now support hot reloading. (#649)
### Changed
- Build against go 1.18. (#656)
- Promoted `routines` config from experimental to supported feature. (#702)
- Dependencies updated. (#664)
### Fixed
- Packets destined for the same host that sent it will be returned on MacOS.
This matches the default behavior of other operating systems. (#501)
- `unsafe_route` configuration will no longer crash on Windows. (#648)
- A few panics that were introduced in 1.5.x. (#657, #658, #675)
### Security
- You can set `listen.send_recv_error` to control the conditions in which
`recv_error` messages are sent. Sending these messages can expose the fact
that Nebula is running on a host, but it speeds up re-handshaking. (#670)
### Removed
- `x509` config stanza support has been removed. (#685)
## [1.5.2] - 2021-12-14
### Added
- Warn when a non lighthouse node does not have lighthouse hosts configured. (#587)
### Changed
- No longer fatals if expired CA certificates are present in `pki.ca`, as long as 1 valid CA is present. (#599)
- `nebula-cert` will now enforce ipv4 addresses. (#604)
- Warn on macOS if an unsafe route cannot be created due to a collision with an
existing route. (#610)
- Warn if you set a route MTU on platforms where we don't support it. (#611)
### Fixed
- Rare race condition when tearing down a tunnel due to `recv_error` and sending packets on another thread. (#590)
- Bug in `routes` and `unsafe_routes` handling that was introduced in 1.5.0. (#595)
- `-test` mode no longer results in a crash. (#602)
### Removed
- `x509.ca` config alias for `pki.ca`. (#604)
### Security
- Upgraded `golang.org/x/crypto` to address an issue which allowed unauthenticated clients to cause a panic in SSH
servers. (#603)
## 1.5.1 - 2021-12-13
(This release was skipped due to discovering #610 and #611 after the tag was
created.)
## [1.5.0] - 2021-11-11
### Added
- SSH `print-cert` has a new `-raw` flag to get the PEM representation of a certificate. (#483)
- New build architecture: Linux `riscv64`. (#542)
- New experimental config option `remote_allow_ranges`. (#540)
- New config option `pki.disconnect_invalid` that will tear down tunnels when they become invalid (through expiry or
removal of root trust). Default is `false`. Note, this will not currently recognize if a remote has changed
certificates since the last handshake. (#370)
- New config option `unsafe_routes.<route>.metric` will set a metric for a specific unsafe route. It's useful if you have
more than one identical route and want to prefer one against the other. (#353)
### Changed
- Build against go 1.17. (#553)
- Build with `CGO_ENABLED=0` set, to create more portable binaries. This could
have an effect on DNS resolution if you rely on anything non-standard. (#421)
- Windows now uses the [wintun](https://www.wintun.net/) driver which does not require installation. This driver
is a large improvement over the TAP driver that was used in previous versions. If you had a previous version
of `nebula` running, you will want to disable the tap driver in Control Panel, or uninstall the `tap0901` driver
before running this version. (#289)
- Darwin binaries are now universal (works on both amd64 and arm64), signed, and shipped in a notarized zip file.
`nebula-darwin.zip` will be the only darwin release artifact. (#571)
- Darwin uses syscalls and AF_ROUTE to configure the routing table, instead of
using `/sbin/route`. Setting `tun.dev` is now allowed on Darwin as well, it
must be in the format `utun[0-9]+` or it will be ignored. (#163)
### Deprecated
- The `preferred_ranges` option has been supported as a replacement for
`local_range` since v1.0.0. It has now been documented and `local_range`
has been officially deprecated. (#541)
### Fixed
- Valid recv_error packets were incorrectly marked as "spoofing" and ignored. (#482)
- SSH server handles single `exec` requests correctly. (#483)
- Signing a certificate with `nebula-cert sign` now verifies that the supplied
ca-key matches the ca-crt. (#503)
- If `preferred_ranges` (or the deprecated `local_range`) is configured, we
will immediately switch to a preferred remote address after the reception of
a handshake packet (instead of waiting until 1,000 packets have been sent).
(#532)
- A race condition when `punchy.respond` is enabled and ensures the correct
vpn ip is sent a punch back response in highly queried node. (#566)
- Fix a rare crash during handshake due to a race condition. (#535)
## [1.4.0] - 2021-05-11
### Added
- Ability to output qr code images in `print`, `ca`, and `sign` modes for `nebula-cert`.
This is useful when configuring mobile clients. (#297)
- Experimental: Nebula can now do work on more than 2 cpu cores in send and receive paths via
the new `routines` config option. (#382, #391, #395)
- ICMP ping requests can be responded to when the `tun.disabled` is `true`.
This is useful so that you can "ping" a lighthouse running in this mode. (#342)
- Run smoke tests via `make smoke-docker`. (#287)
- More reported stats, udp memory use on linux, build version (when using Prometheus), firewall,
handshake, and cached packet stats. (#390, #405, #450, #453)
- IPv6 support for the underlay network. (#369)
- End to end testing, run with `make e2e`. (#425, #427, #428)
### Changed
- Darwin will now log stdout/stderr to a file when using `-service` mode. (#303)
- Example systemd unit file now better arranged startup order when using `sshd`
and other fixes. (#317, #412, #438)
- Reduced memory utilization/garbage collection. (#320, #323, #340)
- Reduced CPU utilization. (#329)
- Build against go 1.16. (#381)
- Refactored handshakes to improve performance and correctness. (#401, #402, #404, #416, #451)
- Improved roaming support for mobile clients. (#394, #457)
- Lighthouse performance and correctness improvements. (#406, #418, #429, #433, #437, #442, #449)
- Better ordered startup to enable `sshd`, `stats`, and `dns` subsystems to listen on
the nebula interface. (#375)
### Fixed
- No longer report handshake packets as `lost` in stats. (#331)
- Error handling in the `cert` package. (#339, #373)
- Orphaned pending hostmap entries are cleaned up. (#344)
- Most known data races are now resolved. (#396, #400, #424)
- Refuse to run a lighthouse on an ephemeral port. (#399)
- Removed the global references. (#423, #426, #446)
- Reloading via ssh command avoids a panic. (#447)
- Shutdown is now performed in a cleaner way. (#448)
- Logs will now find their way to Windows event viewer when running under `-service` mode
in Windows. (#443)
## [1.3.0] - 2020-09-22 ## [1.3.0] - 2020-09-22
@@ -475,13 +185,7 @@ created.)
- Initial public release. - Initial public release.
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.7.0...HEAD [Unreleased]: https://github.com/slackhq/nebula/compare/v1.3.0...HEAD
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
[1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0
[1.5.2]: https://github.com/slackhq/nebula/releases/tag/v1.5.2
[1.5.0]: https://github.com/slackhq/nebula/releases/tag/v1.5.0
[1.4.0]: https://github.com/slackhq/nebula/releases/tag/v1.4.0
[1.3.0]: https://github.com/slackhq/nebula/releases/tag/v1.3.0 [1.3.0]: https://github.com/slackhq/nebula/releases/tag/v1.3.0
[1.2.0]: https://github.com/slackhq/nebula/releases/tag/v1.2.0 [1.2.0]: https://github.com/slackhq/nebula/releases/tag/v1.2.0
[1.1.0]: https://github.com/slackhq/nebula/releases/tag/v1.1.0 [1.1.0]: https://github.com/slackhq/nebula/releases/tag/v1.1.0

View File

@@ -1,38 +0,0 @@
### Logging conventions
A log message (the string/format passed to `Info`, `Error`, `Debug` etc, as well as their `Sprintf` counterparts) should
be a descriptive message about the event and may contain specific identifying characteristics. Regardless of the
level of detail in the message identifying characteristics should always be included via `WithField`, `WithFields` or
`WithError`
If an error is being logged use `l.WithError(err)` so that there is better discoverability about the event as well
as the specific error condition.
#### Common fields
- `cert` - a `cert.NebulaCertificate` object, do not `.String()` this manually, `logrus` will marshal objects properly
for the formatter it is using.
- `fingerprint` - a single `NebeulaCertificate` hex encoded fingerprint
- `fingerprints` - an array of `NebulaCertificate` hex encoded fingerprints
- `fwPacket` - a FirewallPacket object
- `handshake` - an object containing:
- `stage` - the current stage counter
- `style` - noise handshake style `ix_psk0`, `xx`, etc
- `header` - a nebula header object
- `udpAddr` - a `net.UDPAddr` object
- `udpIp` - a udp ip address
- `vpnIp` - vpn ip of the host (remote or local)
- `relay` - the vpnIp of the relay host that is or should be handling the relay packet
- `relayFrom` - The vpnIp of the initial sender of the relayed packet
- `relayTo` - The vpnIp of the final destination of a relayed packet
#### Example:
```
l.WithError(err).
WithField("vpnIp", IntIp(hostinfo.hostId)).
WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix"}).
WithField("cert", remoteCert).
Info("Invalid certificate from host")
```

108
Makefile
View File

@@ -1,32 +1,7 @@
GOMINVERSION = 1.20
NEBULA_CMD_PATH = "./cmd/nebula" NEBULA_CMD_PATH = "./cmd/nebula"
BUILD_NUMBER ?= dev+$(shell date -u '+%Y%m%d%H%M%S')
GO111MODULE = on GO111MODULE = on
export GO111MODULE export GO111MODULE
CGO_ENABLED = 0
export CGO_ENABLED
# Set up OS specific bits
ifeq ($(OS),Windows_NT)
#TODO: we should be able to ditch awk as well
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
NEBULA_CMD_SUFFIX = .exe
NULL_FILE = nul
else
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
NEBULA_CMD_SUFFIX =
NULL_FILE = /dev/null
endif
# Only defined the build number if we haven't already
ifndef BUILD_NUMBER
ifeq ($(shell git describe --exact-match 2>$(NULL_FILE)),)
BUILD_NUMBER = $(shell git describe --abbrev=0 --match "v*" | cut -dv -f2)-$(shell git branch --show-current)-$(shell git describe --long --dirty | cut -d- -f2-)
else
BUILD_NUMBER = $(shell git describe --exact-match --dirty | cut -dv -f2)
endif
endif
LDFLAGS = -X main.Build=$(BUILD_NUMBER) LDFLAGS = -X main.Build=$(BUILD_NUMBER)
@@ -41,33 +16,12 @@ ALL_LINUX = linux-amd64 \
linux-mipsle \ linux-mipsle \
linux-mips64 \ linux-mips64 \
linux-mips64le \ linux-mips64le \
linux-mips-softfloat \ linux-mips-softfloat
linux-riscv64
ALL = $(ALL_LINUX) \ ALL = $(ALL_LINUX) \
darwin-amd64 \ darwin-amd64 \
darwin-arm64 \
freebsd-amd64 \ freebsd-amd64 \
windows-amd64 \ windows-amd64
windows-arm64
e2e:
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
e2ev: TEST_FLAGS = -v
e2ev: e2e
e2evv: TEST_ENV += TEST_LOGS=1
e2evv: e2ev
e2evvv: TEST_ENV += TEST_LOGS=2
e2evvv: e2ev
e2evvvv: TEST_ENV += TEST_LOGS=3
e2evvvv: e2ev
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
e2e-bench: e2e
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert) all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
@@ -77,32 +31,22 @@ release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
release-freebsd: build/nebula-freebsd-amd64.tar.gz release-freebsd: build/nebula-freebsd-amd64.tar.gz
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
BUILD_ARGS = -trimpath
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
mv $? . mv $? .
bin-windows-arm64: build/windows-arm64/nebula.exe build/windows-arm64/nebula-cert.exe
mv $? .
bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
mv $? . mv $? .
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
mv $? . mv $? .
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
mv $? .
bin: bin:
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH} go build -trimpath -ldflags "$(LDFLAGS)" -o ./nebula ${NEBULA_CMD_PATH}
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert go build -trimpath -ldflags "$(LDFLAGS)" -o ./nebula-cert ./cmd/nebula-cert
install: install:
go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH} go install -trimpath -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert go install -trimpath -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*)) build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*))
build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*)) build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
@@ -110,19 +54,15 @@ build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
# Build an extra small binary for mips-softfloat # Build an extra small binary for mips-softfloat
build/linux-mips-softfloat/%: LDFLAGS += -s -w build/linux-mips-softfloat/%: LDFLAGS += -s -w
# boringcrypto
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
build/%/nebula: .FORCE build/%/nebula: .FORCE
GOOS=$(firstword $(subst -, , $*)) \ GOOS=$(firstword $(subst -, , $*)) \
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \ GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH} go build -trimpath -o $@ -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
build/%/nebula-cert: .FORCE build/%/nebula-cert: .FORCE
GOOS=$(firstword $(subst -, , $*)) \ GOOS=$(firstword $(subst -, , $*)) \
GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \ GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ./cmd/nebula-cert go build -trimpath -o $@ -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
build/%/nebula.exe: build/%/nebula build/%/nebula.exe: build/%/nebula
mv $< $@ mv $< $@
@@ -142,9 +82,6 @@ vet:
test: test:
go test -v ./... go test -v ./...
test-boringcrypto:
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -v ./...
test-cov-html: test-cov-html:
go test -coverprofile=coverage.out go test -coverprofile=coverage.out
go tool cover -html=coverage.out go tool cover -html=coverage.out
@@ -163,35 +100,20 @@ bench-cpu-long:
proto: nebula.pb.go cert/cert.pb.go proto: nebula.pb.go cert/cert.pb.go
nebula.pb.go: nebula.proto .FORCE nebula.pb.go: nebula.proto .FORCE
go build github.com/gogo/protobuf/protoc-gen-gogofaster go build github.com/golang/protobuf/protoc-gen-go
PATH="$(CURDIR):$(PATH)" protoc --gogofaster_out=paths=source_relative:. $< PATH="$(PWD):$(PATH)" protoc --go_out=. $<
rm protoc-gen-gogofaster rm protoc-gen-go
cert/cert.pb.go: cert/cert.proto .FORCE cert/cert.pb.go: cert/cert.proto .FORCE
$(MAKE) -C cert cert.pb.go $(MAKE) -C cert cert.pb.go
service: service:
@echo > $(NULL_FILE) @echo > /dev/null
$(eval NEBULA_CMD_PATH := "./cmd/nebula-service") $(eval NEBULA_CMD_PATH := "./cmd/nebula-service")
ifeq ($(words $(MAKECMDGOALS)),1) ifeq ($(words $(MAKECMDGOALS)),1)
@$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory $(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
endif endif
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
smoke-docker: bin-docker
cd .github/workflows/smoke/ && ./build.sh
cd .github/workflows/smoke/ && ./smoke.sh
cd .github/workflows/smoke/ && NAME="smoke-p256" CURVE="P256" ./build.sh
cd .github/workflows/smoke/ && NAME="smoke-p256" ./smoke.sh
smoke-relay-docker: bin-docker
cd .github/workflows/smoke/ && ./build-relay.sh
cd .github/workflows/smoke/ && ./smoke-relay.sh
smoke-docker-race: BUILD_ARGS = -race
smoke-docker-race: smoke-docker
.FORCE: .FORCE:
.PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race .PHONY: test test-cov-html bench bench-cpu bench-cpu-long bin proto release service
.DEFAULT_GOAL := bin .DEFAULT_GOAL := bin

View File

@@ -1,6 +1,7 @@
## What is Nebula? ## What is Nebula?
Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security.
It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, Windows, iOS, and Android. It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, and Windows.
(Also: keep this quiet, but we have an early prototype running on iOS).
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers. It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
Nebula incorporates a number of existing concepts like encryption, security groups, certificates, Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
@@ -8,43 +9,9 @@ and tunneling, and each of those individual pieces existed before Nebula in vari
What makes Nebula different to existing offerings is that it brings all of these ideas together, What makes Nebula different to existing offerings is that it brings all of these ideas together,
resulting in a sum that is greater than its individual parts. resulting in a sum that is greater than its individual parts.
Further documentation can be found [here](https://nebula.defined.net/docs/).
You can read more about Nebula [here](https://medium.com/p/884110a5579). You can read more about Nebula [here](https://medium.com/p/884110a5579).
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU). You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU)
## Supported Platforms
#### Desktop and Server
Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for downloads or see the [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) section.
- Linux - 64 and 32 bit, arm, and others
- Windows
- MacOS
- Freebsd
#### Distribution Packages
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/)
```
$ sudo pacman -S nebula
```
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
```
$ sudo dnf install nebula
```
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
```
$ brew install nebula
```
#### Mobile
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&amp;itscg=30200)
- [Android](https://play.google.com/store/apps/details?id=net.defined.mobile_nebula&pcampaignid=pcampaignidMKT-Other-global-all-co-prtnr-py-PartBadge-Mar2515-1)
## Technical Overview ## Technical Overview
@@ -54,15 +21,15 @@ Nebula's user-defined groups allow for provider agnostic traffic filtering betwe
Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs. Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme. Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme.
Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration. Nebula uses elliptic curve Diffie-Hellman key exchange, and AES-256-GCM in its default configuration.
Nebula was created to provide a mechanism for groups of hosts to communicate securely, even across the internet, while enabling expressive firewall definitions similar in style to cloud security groups. Nebula was created to provide a mechanism for groups hosts to communicate securely, even across the internet, while enabling expressive firewall definitions similar in style to cloud security groups.
## Getting started (quickly) ## Getting started (quickly)
To set up a Nebula network, you'll need: To set up a Nebula network, you'll need:
#### 1. The [Nebula binaries](https://github.com/slackhq/nebula/releases) or [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) for your specific platform. Specifically you'll need `nebula-cert` and the specific nebula binary for each platform you use. #### 1. The [Nebula binaries](https://github.com/slackhq/nebula/releases) for your specific platform. Specifically you'll need `nebula-cert` and the specific nebula binary for each platform you use.
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse. #### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
@@ -97,13 +64,13 @@ Download a copy of the nebula [example configuration](https://github.com/slackhq
#### 6. Copy nebula credentials, configuration, and binaries to each host #### 6. Copy nebula credentials, configuration, and binaries to each host
For each host, copy the nebula binary to the host, along with `config.yml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4. For each host, copy the nebula binary to the host, along with `config.yaml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4.
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.** **DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
#### 7. Run nebula on each host #### 7. Run nebula on each host
``` ```
./nebula -config /path/to/config.yml ./nebula -config /path/to/config.yaml
``` ```
## Building Nebula from source ## Building Nebula from source
@@ -118,17 +85,6 @@ To build nebula for a specific platform (ex, Windows):
See the [Makefile](Makefile) for more details on build targets See the [Makefile](Makefile) for more details on build targets
## Curve P256 and BoringCrypto
The default curve used for cryptographic handshakes and signatures is Curve25519. This is the recommended setting for most users. If your deployment has certain compliance requirements, you have the option of creating your CA using `nebula-cert ca -curve P256` to use NIST Curve P256. The CA will then sign certificates using ECDSA P256, and any hosts using these certificates will use P256 for ECDH handshakes.
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
make bin-boringcrypto
make release-boringcrypto
This is not the recommended default deployment, but may be useful based on your compliance requirements.
## Credits ## Credits
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang. Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.

View File

@@ -1,12 +0,0 @@
Security Policy
===============
Reporting a Vulnerability
-------------------------
If you believe you have found a security vulnerability with Nebula, please let
us know right away. We will investigate all reports and do our best to quickly
fix valid issues.
You can submit your report on [HackerOne](https://hackerone.com/slack) and our
security team will respond as soon as possible.

View File

@@ -2,29 +2,12 @@ package nebula
import ( import (
"fmt" "fmt"
"net"
"regexp" "regexp"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
) )
type AllowList struct { type AllowList struct {
// The values of this cidrTree are `bool`, signifying allow/deny // The values of this cidrTree are `bool`, signifying allow/deny
cidrTree *cidr.Tree6 cidrTree *CIDRTree
}
type RemoteAllowList struct {
AllowList *AllowList
// Inside Range Specific, keys of this tree are inside CIDRs and values
// are *AllowList
insideAllowLists *cidr.Tree6
}
type LocalAllowList struct {
AllowList *AllowList
// To avoid ambiguity, all rules must be true, or all rules must be false. // To avoid ambiguity, all rules must be true, or all rules must be false.
nameRules []AllowListNameRule nameRules []AllowListNameRule
@@ -35,224 +18,7 @@ type AllowListNameRule struct {
Allow bool Allow bool
} }
func NewLocalAllowListFromConfig(c *config.C, k string) (*LocalAllowList, error) { func (al *AllowList) Allow(ip uint32) bool {
var nameRules []AllowListNameRule
handleKey := func(key string, value interface{}) (bool, error) {
if key == "interfaces" {
var err error
nameRules, err = getAllowListInterfaces(k, value)
if err != nil {
return false, err
}
return true, nil
}
return false, nil
}
al, err := newAllowListFromConfig(c, k, handleKey)
if err != nil {
return nil, err
}
return &LocalAllowList{AllowList: al, nameRules: nameRules}, nil
}
func NewRemoteAllowListFromConfig(c *config.C, k, rangesKey string) (*RemoteAllowList, error) {
al, err := newAllowListFromConfig(c, k, nil)
if err != nil {
return nil, err
}
remoteAllowRanges, err := getRemoteAllowRanges(c, rangesKey)
if err != nil {
return nil, err
}
return &RemoteAllowList{AllowList: al, insideAllowLists: remoteAllowRanges}, nil
}
// If the handleKey func returns true, the rest of the parsing is skipped
// for this key. This allows parsing of special values like `interfaces`.
func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
r := c.Get(k)
if r == nil {
return nil, nil
}
return newAllowList(k, r, handleKey)
}
// If the handleKey func returns true, the rest of the parsing is skipped
// for this key. This allows parsing of special values like `interfaces`.
func newAllowList(k string, raw interface{}, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
rawMap, ok := raw.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
}
tree := cidr.NewTree6()
// Keep track of the rules we have added for both ipv4 and ipv6
type allowListRules struct {
firstValue bool
allValuesMatch bool
defaultSet bool
allValues bool
}
rules4 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
rules6 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
for rawKey, rawValue := range rawMap {
rawCIDR, ok := rawKey.(string)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
}
if handleKey != nil {
handled, err := handleKey(rawCIDR, rawValue)
if err != nil {
return nil, err
}
if handled {
continue
}
}
value, ok := rawValue.(bool)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
}
_, ipNet, err := net.ParseCIDR(rawCIDR)
if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
}
// TODO: should we error on duplicate CIDRs in the config?
tree.AddCIDR(ipNet, value)
maskBits, maskSize := ipNet.Mask.Size()
var rules *allowListRules
if maskSize == 32 {
rules = &rules4
} else {
rules = &rules6
}
if rules.firstValue {
rules.allValues = value
rules.firstValue = false
} else {
if value != rules.allValues {
rules.allValuesMatch = false
}
}
// Check if this is 0.0.0.0/0 or ::/0
if maskBits == 0 {
rules.defaultSet = true
}
}
if !rules4.defaultSet {
if rules4.allValuesMatch {
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
tree.AddCIDR(zeroCIDR, !rules4.allValues)
} else {
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
}
}
if !rules6.defaultSet {
if rules6.allValuesMatch {
_, zeroCIDR, _ := net.ParseCIDR("::/0")
tree.AddCIDR(zeroCIDR, !rules6.allValues)
} else {
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
}
}
return &AllowList{cidrTree: tree}, nil
}
func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) {
var nameRules []AllowListNameRule
rawRules, ok := v.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
}
firstEntry := true
var allValues bool
for rawName, rawAllow := range rawRules {
name, ok := rawName.(string)
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName)
}
allow, ok := rawAllow.(bool)
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
}
nameRE, err := regexp.Compile("^" + name + "$")
if err != nil {
return nil, fmt.Errorf("config `%s.interfaces` has invalid key: %s: %v", k, name, err)
}
nameRules = append(nameRules, AllowListNameRule{
Name: nameRE,
Allow: allow,
})
if firstEntry {
allValues = allow
firstEntry = false
} else {
if allow != allValues {
return nil, fmt.Errorf("config `%s.interfaces` values must all be the same true/false value", k)
}
}
}
return nameRules, nil
}
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
value := c.Get(k)
if value == nil {
return nil, nil
}
remoteAllowRanges := cidr.NewTree6()
rawMap, ok := value.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
}
for rawKey, rawValue := range rawMap {
rawCIDR, ok := rawKey.(string)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
}
allowList, err := newAllowList(fmt.Sprintf("%s.%s", k, rawCIDR), rawValue, nil)
if err != nil {
return nil, err
}
_, ipNet, err := net.ParseCIDR(rawCIDR)
if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
}
remoteAllowRanges.AddCIDR(ipNet, allowList)
}
return remoteAllowRanges, nil
}
func (al *AllowList) Allow(ip net.IP) bool {
if al == nil { if al == nil {
return true return true
} }
@@ -266,42 +32,7 @@ func (al *AllowList) Allow(ip net.IP) bool {
} }
} }
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool { func (al *AllowList) AllowName(name string) bool {
if al == nil {
return true
}
result := al.cidrTree.MostSpecificContainsIpV4(ip)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
}
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
if al == nil {
return true
}
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
}
func (al *LocalAllowList) Allow(ip net.IP) bool {
if al == nil {
return true
}
return al.AllowList.Allow(ip)
}
func (al *LocalAllowList) AllowName(name string) bool {
if al == nil || len(al.nameRules) == 0 { if al == nil || len(al.nameRules) == 0 {
return true return true
} }
@@ -315,47 +46,3 @@ func (al *LocalAllowList) AllowName(name string) bool {
// If no rules match, return the default, which is the inverse of the rules // If no rules match, return the default, which is the inverse of the rules
return !al.nameRules[0].Allow return !al.nameRules[0].Allow
} }
func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool {
if al == nil {
return true
}
return al.AllowList.Allow(ip)
}
func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool {
if !al.getInsideAllowList(vpnIp).Allow(ip) {
return false
}
return al.AllowList.Allow(ip)
}
func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool {
if al == nil {
return true
}
if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) {
return false
}
return al.AllowList.AllowIpV4(ip)
}
func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
if al == nil {
return true
}
if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) {
return false
}
return al.AllowList.AllowIpV6(hi, lo)
}
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
if al.insideAllowLists != nil {
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
if inside != nil {
return inside.(*AllowList)
}
}
return nil
}

View File

@@ -5,129 +5,31 @@ import (
"regexp" "regexp"
"testing" "testing"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestNewAllowListFromConfig(t *testing.T) {
l := test.NewLogger()
c := config.NewC(l)
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0": true,
}
r, err := newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
assert.Nil(t, r)
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0/16": "abc",
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0/16": true,
"10.0.0.0/8": false,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
c.Settings["allowlist"] = map[interface{}]interface{}{
"0.0.0.0/0": true,
"10.0.0.0/8": false,
"10.42.42.0/24": true,
"fd00::/8": true,
"fd00:fd00::/16": false,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
c.Settings["allowlist"] = map[interface{}]interface{}{
"0.0.0.0/0": true,
"10.0.0.0/8": false,
"10.42.42.0/24": true,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
if assert.NoError(t, err) {
assert.NotNil(t, r)
}
c.Settings["allowlist"] = map[interface{}]interface{}{
"0.0.0.0/0": true,
"10.0.0.0/8": false,
"10.42.42.0/24": true,
"::/0": false,
"fd00::/8": true,
"fd00:fd00::/16": false,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
if assert.NoError(t, err) {
assert.NotNil(t, r)
}
// Test interface names
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: "foo",
},
}
lr, err := NewLocalAllowListFromConfig(c, "allowlist")
assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: false,
`eth.*`: true,
},
}
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: false,
},
}
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
if assert.NoError(t, err) {
assert.NotNil(t, lr)
}
}
func TestAllowList_Allow(t *testing.T) { func TestAllowList_Allow(t *testing.T) {
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1"))) assert.Equal(t, true, ((*AllowList)(nil)).Allow(ip2int(net.ParseIP("1.1.1.1"))))
tree := cidr.NewTree6() tree := NewCIDRTree()
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true) tree.AddCIDR(getCIDR("0.0.0.0/0"), true)
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false) tree.AddCIDR(getCIDR("10.0.0.0/8"), false)
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true) tree.AddCIDR(getCIDR("10.42.42.0/24"), true)
tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true)
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true)
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false)
tree.AddCIDR(cidr.Parse("::1/128"), true)
tree.AddCIDR(cidr.Parse("::2/128"), false)
al := &AllowList{cidrTree: tree} al := &AllowList{cidrTree: tree}
assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1"))) assert.Equal(t, true, al.Allow(ip2int(net.ParseIP("1.1.1.1"))))
assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4"))) assert.Equal(t, false, al.Allow(ip2int(net.ParseIP("10.0.0.4"))))
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42"))) assert.Equal(t, true, al.Allow(ip2int(net.ParseIP("10.42.42.42"))))
assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41")))
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1")))
assert.Equal(t, true, al.Allow(net.ParseIP("::1")))
assert.Equal(t, false, al.Allow(net.ParseIP("::2")))
} }
func TestLocalAllowList_AllowName(t *testing.T) { func TestAllowList_AllowName(t *testing.T) {
assert.Equal(t, true, ((*LocalAllowList)(nil)).AllowName("docker0")) assert.Equal(t, true, ((*AllowList)(nil)).AllowName("docker0"))
rules := []AllowListNameRule{ rules := []AllowListNameRule{
{Name: regexp.MustCompile("^docker.*$"), Allow: false}, {Name: regexp.MustCompile("^docker.*$"), Allow: false},
{Name: regexp.MustCompile("^tun.*$"), Allow: false}, {Name: regexp.MustCompile("^tun.*$"), Allow: false},
} }
al := &LocalAllowList{nameRules: rules} al := &AllowList{nameRules: rules}
assert.Equal(t, false, al.AllowName("docker0")) assert.Equal(t, false, al.AllowName("docker0"))
assert.Equal(t, false, al.AllowName("tun0")) assert.Equal(t, false, al.AllowName("tun0"))
@@ -137,7 +39,7 @@ func TestLocalAllowList_AllowName(t *testing.T) {
{Name: regexp.MustCompile("^eth.*$"), Allow: true}, {Name: regexp.MustCompile("^eth.*$"), Allow: true},
{Name: regexp.MustCompile("^ens.*$"), Allow: true}, {Name: regexp.MustCompile("^ens.*$"), Allow: true},
} }
al = &LocalAllowList{nameRules: rules} al = &AllowList{nameRules: rules}
assert.Equal(t, false, al.AllowName("docker0")) assert.Equal(t, false, al.AllowName("docker0"))
assert.Equal(t, true, al.AllowName("eth0")) assert.Equal(t, true, al.AllowName("eth0"))

View File

@@ -26,7 +26,7 @@ func NewBits(bits uint64) *Bits {
} }
} }
func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool { func (b *Bits) Check(i uint64) bool {
// If i is the next number, return true. // If i is the next number, return true.
if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) { if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) {
return true return true
@@ -47,7 +47,7 @@ func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool {
return false return false
} }
func (b *Bits) Update(l *logrus.Logger, i uint64) bool { func (b *Bits) Update(i uint64) bool {
// If i is the next number, return true and update current. // If i is the next number, return true and update current.
if i == b.current+1 { if i == b.current+1 {
// Report missed packets, we can only understand what was missed after the first window has been gone through // Report missed packets, we can only understand what was missed after the first window has been gone through

View File

@@ -3,12 +3,10 @@ package nebula
import ( import (
"testing" "testing"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestBits(t *testing.T) { func TestBits(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
// make sure it is the right size // make sure it is the right size
@@ -16,46 +14,46 @@ func TestBits(t *testing.T) {
// This is initialized to zero - receive one. This should work. // This is initialized to zero - receive one. This should work.
assert.True(t, b.Check(l, 1)) assert.True(t, b.Check(1))
u := b.Update(l, 1) u := b.Update(1)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 1, b.current) assert.EqualValues(t, 1, b.current)
g := []bool{false, true, false, false, false, false, false, false, false, false} g := []bool{false, true, false, false, false, false, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Receive two // Receive two
assert.True(t, b.Check(l, 2)) assert.True(t, b.Check(2))
u = b.Update(l, 2) u = b.Update(2)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 2, b.current) assert.EqualValues(t, 2, b.current)
g = []bool{false, true, true, false, false, false, false, false, false, false} g = []bool{false, true, true, false, false, false, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Receive two again - it will fail // Receive two again - it will fail
assert.False(t, b.Check(l, 2)) assert.False(t, b.Check(2))
u = b.Update(l, 2) u = b.Update(2)
assert.False(t, u) assert.False(t, u)
assert.EqualValues(t, 2, b.current) assert.EqualValues(t, 2, b.current)
// Jump ahead to 15, which should clear everything and set the 6th element // Jump ahead to 15, which should clear everything and set the 6th element
assert.True(t, b.Check(l, 15)) assert.True(t, b.Check(15))
u = b.Update(l, 15) u = b.Update(15)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 15, b.current) assert.EqualValues(t, 15, b.current)
g = []bool{false, false, false, false, false, true, false, false, false, false} g = []bool{false, false, false, false, false, true, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Mark 14, which is allowed because it is in the window // Mark 14, which is allowed because it is in the window
assert.True(t, b.Check(l, 14)) assert.True(t, b.Check(14))
u = b.Update(l, 14) u = b.Update(14)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 15, b.current) assert.EqualValues(t, 15, b.current)
g = []bool{false, false, false, false, true, true, false, false, false, false} g = []bool{false, false, false, false, true, true, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Mark 5, which is not allowed because it is not in the window // Mark 5, which is not allowed because it is not in the window
assert.False(t, b.Check(l, 5)) assert.False(t, b.Check(5))
u = b.Update(l, 5) u = b.Update(5)
assert.False(t, u) assert.False(t, u)
assert.EqualValues(t, 15, b.current) assert.EqualValues(t, 15, b.current)
g = []bool{false, false, false, false, true, true, false, false, false, false} g = []bool{false, false, false, false, true, true, false, false, false, false}
@@ -63,65 +61,63 @@ func TestBits(t *testing.T) {
// make sure we handle wrapping around once to the current position // make sure we handle wrapping around once to the current position
b = NewBits(10) b = NewBits(10)
assert.True(t, b.Update(l, 1)) assert.True(t, b.Update(1))
assert.True(t, b.Update(l, 11)) assert.True(t, b.Update(11))
assert.Equal(t, []bool{false, true, false, false, false, false, false, false, false, false}, b.bits) assert.Equal(t, []bool{false, true, false, false, false, false, false, false, false, false}, b.bits)
// Walk through a few windows in order // Walk through a few windows in order
b = NewBits(10) b = NewBits(10)
for i := uint64(0); i <= 100; i++ { for i := uint64(0); i <= 100; i++ {
assert.True(t, b.Check(l, i), "Error while checking %v", i) assert.True(t, b.Check(i), "Error while checking %v", i)
assert.True(t, b.Update(l, i), "Error while updating %v", i) assert.True(t, b.Update(i), "Error while updating %v", i)
} }
} }
func TestBitsDupeCounter(t *testing.T) { func TestBitsDupeCounter(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
b.lostCounter.Clear() b.lostCounter.Clear()
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
assert.True(t, b.Update(l, 1)) assert.True(t, b.Update(1))
assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count())
assert.False(t, b.Update(l, 1)) assert.False(t, b.Update(1))
assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.Equal(t, int64(1), b.dupeCounter.Count())
assert.True(t, b.Update(l, 2)) assert.True(t, b.Update(2))
assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.Equal(t, int64(1), b.dupeCounter.Count())
assert.True(t, b.Update(l, 3)) assert.True(t, b.Update(3))
assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.Equal(t, int64(1), b.dupeCounter.Count())
assert.False(t, b.Update(l, 1)) assert.False(t, b.Update(1))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
assert.Equal(t, int64(2), b.dupeCounter.Count()) assert.Equal(t, int64(2), b.dupeCounter.Count())
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
} }
func TestBitsOutOfWindowCounter(t *testing.T) { func TestBitsOutOfWindowCounter(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
b.lostCounter.Clear() b.lostCounter.Clear()
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
assert.True(t, b.Update(l, 20)) assert.True(t, b.Update(20))
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
assert.True(t, b.Update(l, 21)) assert.True(t, b.Update(21))
assert.True(t, b.Update(l, 22)) assert.True(t, b.Update(22))
assert.True(t, b.Update(l, 23)) assert.True(t, b.Update(23))
assert.True(t, b.Update(l, 24)) assert.True(t, b.Update(24))
assert.True(t, b.Update(l, 25)) assert.True(t, b.Update(25))
assert.True(t, b.Update(l, 26)) assert.True(t, b.Update(26))
assert.True(t, b.Update(l, 27)) assert.True(t, b.Update(27))
assert.True(t, b.Update(l, 28)) assert.True(t, b.Update(28))
assert.True(t, b.Update(l, 29)) assert.True(t, b.Update(29))
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
assert.False(t, b.Update(l, 0)) assert.False(t, b.Update(0))
assert.Equal(t, int64(1), b.outOfWindowCounter.Count()) assert.Equal(t, int64(1), b.outOfWindowCounter.Count())
//tODO: make sure lostcounter doesn't increase in orderly increment //tODO: make sure lostcounter doesn't increase in orderly increment
@@ -131,24 +127,23 @@ func TestBitsOutOfWindowCounter(t *testing.T) {
} }
func TestBitsLostCounter(t *testing.T) { func TestBitsLostCounter(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
b.lostCounter.Clear() b.lostCounter.Clear()
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
//assert.True(t, b.Update(0)) //assert.True(t, b.Update(0))
assert.True(t, b.Update(l, 0)) assert.True(t, b.Update(0))
assert.True(t, b.Update(l, 20)) assert.True(t, b.Update(20))
assert.True(t, b.Update(l, 21)) assert.True(t, b.Update(21))
assert.True(t, b.Update(l, 22)) assert.True(t, b.Update(22))
assert.True(t, b.Update(l, 23)) assert.True(t, b.Update(23))
assert.True(t, b.Update(l, 24)) assert.True(t, b.Update(24))
assert.True(t, b.Update(l, 25)) assert.True(t, b.Update(25))
assert.True(t, b.Update(l, 26)) assert.True(t, b.Update(26))
assert.True(t, b.Update(l, 27)) assert.True(t, b.Update(27))
assert.True(t, b.Update(l, 28)) assert.True(t, b.Update(28))
assert.True(t, b.Update(l, 29)) assert.True(t, b.Update(29))
assert.Equal(t, int64(20), b.lostCounter.Count()) assert.Equal(t, int64(20), b.lostCounter.Count())
assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count())
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
@@ -158,56 +153,56 @@ func TestBitsLostCounter(t *testing.T) {
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
assert.True(t, b.Update(l, 0)) assert.True(t, b.Update(0))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
assert.True(t, b.Update(l, 9)) assert.True(t, b.Update(9))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
// 10 will set 0 index, 0 was already set, no lost packets // 10 will set 0 index, 0 was already set, no lost packets
assert.True(t, b.Update(l, 10)) assert.True(t, b.Update(10))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
// 11 will set 1 index, 1 was missed, we should see 1 packet lost // 11 will set 1 index, 1 was missed, we should see 1 packet lost
assert.True(t, b.Update(l, 11)) assert.True(t, b.Update(11))
assert.Equal(t, int64(1), b.lostCounter.Count()) assert.Equal(t, int64(1), b.lostCounter.Count())
// Now let's fill in the window, should end up with 8 lost packets // Now let's fill in the window, should end up with 8 lost packets
assert.True(t, b.Update(l, 12)) assert.True(t, b.Update(12))
assert.True(t, b.Update(l, 13)) assert.True(t, b.Update(13))
assert.True(t, b.Update(l, 14)) assert.True(t, b.Update(14))
assert.True(t, b.Update(l, 15)) assert.True(t, b.Update(15))
assert.True(t, b.Update(l, 16)) assert.True(t, b.Update(16))
assert.True(t, b.Update(l, 17)) assert.True(t, b.Update(17))
assert.True(t, b.Update(l, 18)) assert.True(t, b.Update(18))
assert.True(t, b.Update(l, 19)) assert.True(t, b.Update(19))
assert.Equal(t, int64(8), b.lostCounter.Count()) assert.Equal(t, int64(8), b.lostCounter.Count())
// Jump ahead by a window size // Jump ahead by a window size
assert.True(t, b.Update(l, 29)) assert.True(t, b.Update(29))
assert.Equal(t, int64(8), b.lostCounter.Count()) assert.Equal(t, int64(8), b.lostCounter.Count())
// Now lets walk ahead normally through the window, the missed packets should fill in // Now lets walk ahead normally through the window, the missed packets should fill in
assert.True(t, b.Update(l, 30)) assert.True(t, b.Update(30))
assert.True(t, b.Update(l, 31)) assert.True(t, b.Update(31))
assert.True(t, b.Update(l, 32)) assert.True(t, b.Update(32))
assert.True(t, b.Update(l, 33)) assert.True(t, b.Update(33))
assert.True(t, b.Update(l, 34)) assert.True(t, b.Update(34))
assert.True(t, b.Update(l, 35)) assert.True(t, b.Update(35))
assert.True(t, b.Update(l, 36)) assert.True(t, b.Update(36))
assert.True(t, b.Update(l, 37)) assert.True(t, b.Update(37))
assert.True(t, b.Update(l, 38)) assert.True(t, b.Update(38))
// 39 packets tracked, 22 seen, 17 lost // 39 packets tracked, 22 seen, 17 lost
assert.Equal(t, int64(17), b.lostCounter.Count()) assert.Equal(t, int64(17), b.lostCounter.Count())
// Jump ahead by 2 windows, should have recording 1 full window missing // Jump ahead by 2 windows, should have recording 1 full window missing
assert.True(t, b.Update(l, 58)) assert.True(t, b.Update(58))
assert.Equal(t, int64(27), b.lostCounter.Count()) assert.Equal(t, int64(27), b.lostCounter.Count())
// Now lets walk ahead normally through the window, the missed packets should fill in from this window // Now lets walk ahead normally through the window, the missed packets should fill in from this window
assert.True(t, b.Update(l, 59)) assert.True(t, b.Update(59))
assert.True(t, b.Update(l, 60)) assert.True(t, b.Update(60))
assert.True(t, b.Update(l, 61)) assert.True(t, b.Update(61))
assert.True(t, b.Update(l, 62)) assert.True(t, b.Update(62))
assert.True(t, b.Update(l, 63)) assert.True(t, b.Update(63))
assert.True(t, b.Update(l, 64)) assert.True(t, b.Update(64))
assert.True(t, b.Update(l, 65)) assert.True(t, b.Update(65))
assert.True(t, b.Update(l, 66)) assert.True(t, b.Update(66))
assert.True(t, b.Update(l, 67)) assert.True(t, b.Update(67))
// 68 packets tracked, 32 seen, 36 missed // 68 packets tracked, 32 seen, 36 missed
assert.Equal(t, int64(36), b.lostCounter.Count()) assert.Equal(t, int64(36), b.lostCounter.Count())
assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count())

View File

@@ -1,8 +0,0 @@
//go:build boringcrypto
// +build boringcrypto
package nebula
import "crypto/boring"
var boringEnabled = boring.Enabled

View File

@@ -1,143 +0,0 @@
package nebula
import (
"fmt"
"math"
"net"
"strconv"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
)
// This allows us to "guess" what the remote might be for a host while we wait
// for the lighthouse response. See "lighthouse.calculated_remotes" in the
// example config file.
type calculatedRemote struct {
ipNet net.IPNet
maskIP iputil.VpnIp
mask iputil.VpnIp
port uint32
}
func newCalculatedRemote(ipNet *net.IPNet, port int) (*calculatedRemote, error) {
// Ensure this is an IPv4 mask that we expect
ones, bits := ipNet.Mask.Size()
if ones == 0 || bits != 32 {
return nil, fmt.Errorf("invalid mask: %v", ipNet)
}
if port < 0 || port > math.MaxUint16 {
return nil, fmt.Errorf("invalid port: %d", port)
}
return &calculatedRemote{
ipNet: *ipNet,
maskIP: iputil.Ip2VpnIp(ipNet.IP),
mask: iputil.Ip2VpnIp(ipNet.Mask),
port: uint32(port),
}, nil
}
func (c *calculatedRemote) String() string {
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
}
func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
// Combine the masked bytes of the "mask" IP with the unmasked bytes
// of the overlay IP
masked := (c.maskIP & c.mask) | (ip & ^c.mask)
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
}
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error) {
value := c.Get(k)
if value == nil {
return nil, nil
}
calculatedRemotes := cidr.NewTree4()
rawMap, ok := value.(map[any]any)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
}
for rawKey, rawValue := range rawMap {
rawCIDR, ok := rawKey.(string)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
}
_, ipNet, err := net.ParseCIDR(rawCIDR)
if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
}
entry, err := newCalculatedRemotesListFromConfig(rawValue)
if err != nil {
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
}
calculatedRemotes.AddCIDR(ipNet, entry)
}
return calculatedRemotes, nil
}
func newCalculatedRemotesListFromConfig(raw any) ([]*calculatedRemote, error) {
rawList, ok := raw.([]any)
if !ok {
return nil, fmt.Errorf("calculated_remotes entry has invalid type: %T", raw)
}
var l []*calculatedRemote
for _, e := range rawList {
c, err := newCalculatedRemotesEntryFromConfig(e)
if err != nil {
return nil, fmt.Errorf("calculated_remotes entry: %w", err)
}
l = append(l, c)
}
return l, nil
}
func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
rawMap, ok := raw.(map[any]any)
if !ok {
return nil, fmt.Errorf("invalid type: %T", raw)
}
rawValue := rawMap["mask"]
if rawValue == nil {
return nil, fmt.Errorf("missing mask: %v", rawMap)
}
rawMask, ok := rawValue.(string)
if !ok {
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
}
_, ipNet, err := net.ParseCIDR(rawMask)
if err != nil {
return nil, fmt.Errorf("invalid mask: %s", rawMask)
}
var port int
rawValue = rawMap["port"]
if rawValue == nil {
return nil, fmt.Errorf("missing port: %v", rawMap)
}
switch v := rawValue.(type) {
case int:
port = v
case string:
port, err = strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("invalid port: %s: %w", v, err)
}
default:
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
}
return newCalculatedRemote(ipNet, port)
}

View File

@@ -1,27 +0,0 @@
package nebula
import (
"net"
"testing"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCalculatedRemoteApply(t *testing.T) {
_, ipNet, err := net.ParseCIDR("192.168.1.0/24")
require.NoError(t, err)
c, err := newCalculatedRemote(ipNet, 4242)
require.NoError(t, err)
input := iputil.Ip2VpnIp([]byte{10, 0, 10, 182})
expected := &Ip4AndPort{
Ip: uint32(iputil.Ip2VpnIp([]byte{192, 168, 1, 182})),
Port: 4242,
}
assert.Equal(t, expected, c.Apply(input))
}

52
cert.go
View File

@@ -7,11 +7,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
) )
var trustedCAs *cert.NebulaCAPool
type CertState struct { type CertState struct {
certificate *cert.NebulaCertificate certificate *cert.NebulaCertificate
rawCertificate []byte rawCertificate []byte
@@ -46,11 +46,16 @@ func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*Cert
return cs, nil return cs, nil
} }
func NewCertStateFromConfig(c *config.C) (*CertState, error) { func NewCertStateFromConfig(c *Config) (*CertState, error) {
var pemPrivateKey []byte var pemPrivateKey []byte
var err error var err error
privPathOrPEM := c.GetString("pki.key", "") privPathOrPEM := c.GetString("pki.key", "")
if privPathOrPEM == "" {
// Support backwards compat with the old x509
//TODO: remove after this is rolled out everywhere - NB 2018/02/23
privPathOrPEM = c.GetString("x509.key", "")
}
if privPathOrPEM == "" { if privPathOrPEM == "" {
return nil, errors.New("no pki.key path or PEM data provided") return nil, errors.New("no pki.key path or PEM data provided")
@@ -66,7 +71,7 @@ func NewCertStateFromConfig(c *config.C) (*CertState, error) {
} }
} }
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey) rawKey, _, err := cert.UnmarshalX25519PrivateKey(pemPrivateKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err) return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
} }
@@ -74,6 +79,11 @@ func NewCertStateFromConfig(c *config.C) (*CertState, error) {
var rawCert []byte var rawCert []byte
pubPathOrPEM := c.GetString("pki.cert", "") pubPathOrPEM := c.GetString("pki.cert", "")
if pubPathOrPEM == "" {
// Support backwards compat with the old x509
//TODO: remove after this is rolled out everywhere - NB 2018/02/23
pubPathOrPEM = c.GetString("x509.cert", "")
}
if pubPathOrPEM == "" { if pubPathOrPEM == "" {
return nil, errors.New("no pki.cert path or PEM data provided") return nil, errors.New("no pki.cert path or PEM data provided")
@@ -102,25 +112,31 @@ func NewCertStateFromConfig(c *config.C) (*CertState, error) {
return nil, fmt.Errorf("no IPs encoded in certificate") return nil, fmt.Errorf("no IPs encoded in certificate")
} }
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil { if err = nebulaCert.VerifyPrivateKey(rawKey); err != nil {
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert") return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
} }
return NewCertState(nebulaCert, rawKey) return NewCertState(nebulaCert, rawKey)
} }
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) { func loadCAFromConfig(c *Config) (*cert.NebulaCAPool, error) {
var rawCA []byte var rawCA []byte
var err error var err error
caPathOrPEM := c.GetString("pki.ca", "") caPathOrPEM := c.GetString("pki.ca", "")
if caPathOrPEM == "" {
// Support backwards compat with the old x509
//TODO: remove after this is rolled out everywhere - NB 2018/02/23
caPathOrPEM = c.GetString("x509.ca", "")
}
if caPathOrPEM == "" { if caPathOrPEM == "" {
return nil, errors.New("no pki.ca path or PEM data provided") return nil, errors.New("no pki.ca path or PEM data provided")
} }
if strings.Contains(caPathOrPEM, "-----BEGIN") { if strings.Contains(caPathOrPEM, "-----BEGIN") {
rawCA = []byte(caPathOrPEM) rawCA = []byte(caPathOrPEM)
caPathOrPEM = "<inline>"
} else { } else {
rawCA, err = ioutil.ReadFile(caPathOrPEM) rawCA, err = ioutil.ReadFile(caPathOrPEM)
if err != nil { if err != nil {
@@ -129,32 +145,18 @@ func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error)
} }
CAs, err := cert.NewCAPoolFromBytes(rawCA) CAs, err := cert.NewCAPoolFromBytes(rawCA)
if errors.Is(err, cert.ErrExpired) { if err != nil {
var expired int
for _, cert := range CAs.CAs {
if cert.Expired(time.Now()) {
expired++
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
}
}
if expired >= len(CAs.CAs) {
return nil, errors.New("no valid CA certificates present")
}
} else if err != nil {
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err) return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
} }
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) { for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert") l.WithField("fingerprint", fp).Infof("Blocklisting cert")
CAs.BlocklistFingerprint(fp) CAs.BlocklistFingerprint(fp)
} }
// Support deprecated config for at least one minor release to allow for migrations // Support deprecated config for at leaast one minor release to allow for migrations
//TODO: remove in 2022 or later
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) { for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert") l.WithField("fingerprint", fp).Infof("Blocklisting cert")
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist") l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
CAs.BlocklistFingerprint(fp) CAs.BlocklistFingerprint(fp)
} }

View File

@@ -2,8 +2,8 @@ GO111MODULE = on
export GO111MODULE export GO111MODULE
cert.pb.go: cert.proto .FORCE cert.pb.go: cert.proto .FORCE
go build google.golang.org/protobuf/cmd/protoc-gen-go go build github.com/golang/protobuf/protoc-gen-go
PATH="$(CURDIR):$(PATH)" protoc --go_out=. --go_opt=paths=source_relative $< PATH="$(PWD):$(PATH)" protoc --go_out=. $<
rm protoc-gen-go rm protoc-gen-go
.FORCE: .FORCE:

View File

@@ -1,7 +1,6 @@
package cert package cert
import ( import (
"errors"
"fmt" "fmt"
"strings" "strings"
"time" "time"
@@ -22,32 +21,19 @@ func NewCAPool() *NebulaCAPool {
return &ca return &ca
} }
// NewCAPoolFromBytes will create a new CA pool from the provided
// input bytes, which must be a PEM-encoded set of nebula certificates.
// If the pool contains any expired certificates, an ErrExpired will be
// returned along with the pool. The caller must handle any such errors.
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) { func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) {
pool := NewCAPool() pool := NewCAPool()
var err error var err error
var expired bool
for { for {
caPEMs, err = pool.AddCACertificate(caPEMs) caPEMs, err = pool.AddCACertificate(caPEMs)
if errors.Is(err, ErrExpired) {
expired = true
err = nil
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" { if caPEMs == nil || len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
break break
} }
} }
if expired {
return pool, ErrExpired
}
return pool, nil return pool, nil
} }
@@ -61,11 +47,15 @@ func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) {
} }
if !c.Details.IsCA { if !c.Details.IsCA {
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotCA) return pemBytes, fmt.Errorf("provided certificate was not a CA; %s", c.Details.Name)
} }
if !c.CheckSignature(c.Details.PublicKey) { if !c.CheckSignature(c.Details.PublicKey) {
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotSelfSigned) return pemBytes, fmt.Errorf("provided certificate was not self signed; %s", c.Details.Name)
}
if c.Expired(time.Now()) {
return pemBytes, fmt.Errorf("provided CA certificate is expired; %s", c.Details.Name)
} }
sum, err := c.Sha256Sum() sum, err := c.Sha256Sum()
@@ -74,10 +64,6 @@ func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) {
} }
ncp.CAs[sum] = c ncp.CAs[sum] = c
if c.Expired(time.Now()) {
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrExpired)
}
return pemBytes, nil return pemBytes, nil
} }
@@ -91,15 +77,9 @@ func (ncp *NebulaCAPool) ResetCertBlocklist() {
ncp.certBlocklist = make(map[string]struct{}) ncp.certBlocklist = make(map[string]struct{})
} }
// NOTE: This uses an internal cache for Sha256Sum() that will not be invalidated
// automatically if you manually change any fields in the NebulaCertificate.
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
return ncp.isBlocklistedWithCache(c, false)
}
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted // IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
func (ncp *NebulaCAPool) isBlocklistedWithCache(c *NebulaCertificate, useCache bool) bool { func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
h, err := c.sha256SumWithCache(useCache) h, err := c.Sha256Sum()
if err != nil { if err != nil {
return true return true
} }

View File

@@ -2,26 +2,20 @@ package cert
import ( import (
"bytes" "bytes"
"crypto/ecdh" "crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"encoding/pem" "encoding/pem"
"errors"
"fmt" "fmt"
"math"
"math/big"
"net" "net"
"sync/atomic"
"time" "time"
"github.com/golang/protobuf/proto"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
"google.golang.org/protobuf/proto" "golang.org/x/crypto/ed25519"
) )
const publicKeyLen = 32 const publicKeyLen = 32
@@ -30,27 +24,13 @@ const (
CertBanner = "NEBULA CERTIFICATE" CertBanner = "NEBULA CERTIFICATE"
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY" X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY" X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY" Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY" Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
) )
type NebulaCertificate struct { type NebulaCertificate struct {
Details NebulaCertificateDetails Details NebulaCertificateDetails
Signature []byte Signature []byte
// the cached hex string of the calculated sha256sum
// for VerifyWithCache
sha256sum atomic.Pointer[string]
// the cached public key bytes if they were verified as the signer
// for VerifyWithCache
signatureVerified atomic.Pointer[[]byte]
} }
type NebulaCertificateDetails struct { type NebulaCertificateDetails struct {
@@ -66,25 +46,10 @@ type NebulaCertificateDetails struct {
// Map of groups for faster lookup // Map of groups for faster lookup
InvertedGroups map[string]struct{} InvertedGroups map[string]struct{}
Curve Curve
}
type NebulaEncryptedData struct {
EncryptionMetadata NebulaEncryptionMetadata
Ciphertext []byte
}
type NebulaEncryptionMetadata struct {
EncryptionAlgorithm string
Argon2Parameters Argon2Parameters
} }
type m map[string]interface{} type m map[string]interface{}
// Returned if we try to unmarshal an encrypted private key without a passphrase
var ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert // UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert
func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) { func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
if len(b) == 0 { if len(b) == 0 {
@@ -96,10 +61,6 @@ func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
return nil, err return nil, err
} }
if rc.Details == nil {
return nil, fmt.Errorf("encoded Details was nil")
}
if len(rc.Details.Ips)%2 != 0 { if len(rc.Details.Ips)%2 != 0 {
return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found") return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found")
} }
@@ -119,7 +80,6 @@ func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
PublicKey: make([]byte, len(rc.Details.PublicKey)), PublicKey: make([]byte, len(rc.Details.PublicKey)),
IsCA: rc.Details.IsCA, IsCA: rc.Details.IsCA,
InvertedGroups: make(map[string]struct{}), InvertedGroups: make(map[string]struct{}),
Curve: rc.Details.Curve,
}, },
Signature: make([]byte, len(rc.Signature)), Signature: make([]byte, len(rc.Signature)),
} }
@@ -163,35 +123,10 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
if p == nil { if p == nil {
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
} }
if p.Type != CertBanner {
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
}
nc, err := UnmarshalNebulaCertificate(p.Bytes) nc, err := UnmarshalNebulaCertificate(p.Bytes)
return nc, r, err return nc, r, err
} }
func MarshalPrivateKey(curve Curve, b []byte) []byte {
switch curve {
case Curve_CURVE25519:
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
case Curve_P256:
return pem.EncodeToMemory(&pem.Block{Type: P256PrivateKeyBanner, Bytes: b})
default:
return nil
}
}
func MarshalSigningPrivateKey(curve Curve, b []byte) []byte {
switch curve {
case Curve_CURVE25519:
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: b})
case Curve_P256:
return pem.EncodeToMemory(&pem.Block{Type: ECDSAP256PrivateKeyBanner, Bytes: b})
default:
return nil
}
}
// MarshalX25519PrivateKey is a simple helper to PEM encode an X25519 private key // MarshalX25519PrivateKey is a simple helper to PEM encode an X25519 private key
func MarshalX25519PrivateKey(b []byte) []byte { func MarshalX25519PrivateKey(b []byte) []byte {
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b}) return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
@@ -202,87 +137,6 @@ func MarshalEd25519PrivateKey(key ed25519.PrivateKey) []byte {
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: key}) return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: key})
} }
func UnmarshalPrivateKey(b []byte) ([]byte, []byte, Curve, error) {
k, r := pem.Decode(b)
if k == nil {
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
}
var expectedLen int
var curve Curve
switch k.Type {
case X25519PrivateKeyBanner:
expectedLen = 32
curve = Curve_CURVE25519
case P256PrivateKeyBanner:
expectedLen = 32
curve = Curve_P256
default:
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula private key banner")
}
if len(k.Bytes) != expectedLen {
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s private key", expectedLen, curve)
}
return k.Bytes, r, curve, nil
}
func UnmarshalSigningPrivateKey(b []byte) ([]byte, []byte, Curve, error) {
k, r := pem.Decode(b)
if k == nil {
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
}
var curve Curve
switch k.Type {
case EncryptedEd25519PrivateKeyBanner:
return nil, nil, Curve_CURVE25519, ErrPrivateKeyEncrypted
case EncryptedECDSAP256PrivateKeyBanner:
return nil, nil, Curve_P256, ErrPrivateKeyEncrypted
case Ed25519PrivateKeyBanner:
curve = Curve_CURVE25519
if len(k.Bytes) != ed25519.PrivateKeySize {
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid Ed25519 private key", ed25519.PrivateKeySize)
}
case ECDSAP256PrivateKeyBanner:
curve = Curve_P256
if len(k.Bytes) != 32 {
return nil, r, 0, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
}
default:
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula Ed25519/ECDSA private key banner")
}
return k.Bytes, r, curve, nil
}
// EncryptAndMarshalSigningPrivateKey is a simple helper to encrypt and PEM encode a private key
func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte, kdfParams *Argon2Parameters) ([]byte, error) {
ciphertext, err := aes256Encrypt(passphrase, kdfParams, b)
if err != nil {
return nil, err
}
b, err = proto.Marshal(&RawNebulaEncryptedData{
EncryptionMetadata: &RawNebulaEncryptionMetadata{
EncryptionAlgorithm: "AES-256-GCM",
Argon2Parameters: &RawNebulaArgon2Parameters{
Version: kdfParams.version,
Memory: kdfParams.Memory,
Parallelism: uint32(kdfParams.Parallelism),
Iterations: kdfParams.Iterations,
Salt: kdfParams.salt,
},
},
Ciphertext: ciphertext,
})
switch curve {
case Curve_CURVE25519:
return pem.EncodeToMemory(&pem.Block{Type: EncryptedEd25519PrivateKeyBanner, Bytes: b}), nil
case Curve_P256:
return pem.EncodeToMemory(&pem.Block{Type: EncryptedECDSAP256PrivateKeyBanner, Bytes: b}), nil
default:
return nil, fmt.Errorf("invalid curve: %v", curve)
}
}
// UnmarshalX25519PrivateKey will try to pem decode an X25519 private key, returning any other bytes b // UnmarshalX25519PrivateKey will try to pem decode an X25519 private key, returning any other bytes b
// or an error on failure // or an error on failure
func UnmarshalX25519PrivateKey(b []byte) ([]byte, []byte, error) { func UnmarshalX25519PrivateKey(b []byte) ([]byte, []byte, error) {
@@ -307,13 +161,9 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
if k == nil { if k == nil {
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
} }
if k.Type != Ed25519PrivateKeyBanner {
if k.Type == EncryptedEd25519PrivateKeyBanner {
return nil, r, ErrPrivateKeyEncrypted
} else if k.Type != Ed25519PrivateKeyBanner {
return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 private key banner") return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 private key banner")
} }
if len(k.Bytes) != ed25519.PrivateKeySize { if len(k.Bytes) != ed25519.PrivateKeySize {
return nil, r, fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key") return nil, r, fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
} }
@@ -321,126 +171,6 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
return k.Bytes, r, nil return k.Bytes, r, nil
} }
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert into its
// protobuf-generated struct.
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
if len(b) == 0 {
return nil, fmt.Errorf("nil byte array")
}
var rned RawNebulaEncryptedData
err := proto.Unmarshal(b, &rned)
if err != nil {
return nil, err
}
if rned.EncryptionMetadata == nil {
return nil, fmt.Errorf("encoded EncryptionMetadata was nil")
}
if rned.EncryptionMetadata.Argon2Parameters == nil {
return nil, fmt.Errorf("encoded Argon2Parameters was nil")
}
params, err := unmarshalArgon2Parameters(rned.EncryptionMetadata.Argon2Parameters)
if err != nil {
return nil, err
}
ned := NebulaEncryptedData{
EncryptionMetadata: NebulaEncryptionMetadata{
EncryptionAlgorithm: rned.EncryptionMetadata.EncryptionAlgorithm,
Argon2Parameters: *params,
},
Ciphertext: rned.Ciphertext,
}
return &ned, nil
}
func unmarshalArgon2Parameters(params *RawNebulaArgon2Parameters) (*Argon2Parameters, error) {
if params.Version < math.MinInt32 || params.Version > math.MaxInt32 {
return nil, fmt.Errorf("Argon2Parameters Version must be at least %d and no more than %d", math.MinInt32, math.MaxInt32)
}
if params.Memory <= 0 || params.Memory > math.MaxUint32 {
return nil, fmt.Errorf("Argon2Parameters Memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
}
if params.Parallelism <= 0 || params.Parallelism > math.MaxUint8 {
return nil, fmt.Errorf("Argon2Parameters Parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
}
if params.Iterations <= 0 || params.Iterations > math.MaxUint32 {
return nil, fmt.Errorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
}
return &Argon2Parameters{
version: rune(params.Version),
Memory: uint32(params.Memory),
Parallelism: uint8(params.Parallelism),
Iterations: uint32(params.Iterations),
salt: params.Salt,
}, nil
}
// DecryptAndUnmarshalSigningPrivateKey will try to pem decode and decrypt an Ed25519/ECDSA private key with
// the given passphrase, returning any other bytes b or an error on failure
func DecryptAndUnmarshalSigningPrivateKey(passphrase, b []byte) (Curve, []byte, []byte, error) {
var curve Curve
k, r := pem.Decode(b)
if k == nil {
return curve, nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
}
switch k.Type {
case EncryptedEd25519PrivateKeyBanner:
curve = Curve_CURVE25519
case EncryptedECDSAP256PrivateKeyBanner:
curve = Curve_P256
default:
return curve, nil, r, fmt.Errorf("bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
}
ned, err := UnmarshalNebulaEncryptedData(k.Bytes)
if err != nil {
return curve, nil, r, err
}
var bytes []byte
switch ned.EncryptionMetadata.EncryptionAlgorithm {
case "AES-256-GCM":
bytes, err = aes256Decrypt(passphrase, &ned.EncryptionMetadata.Argon2Parameters, ned.Ciphertext)
if err != nil {
return curve, nil, r, err
}
default:
return curve, nil, r, fmt.Errorf("unsupported encryption algorithm: %s", ned.EncryptionMetadata.EncryptionAlgorithm)
}
switch curve {
case Curve_CURVE25519:
if len(bytes) != ed25519.PrivateKeySize {
return curve, nil, r, fmt.Errorf("key was not %d bytes, is invalid ed25519 private key", ed25519.PrivateKeySize)
}
case Curve_P256:
if len(bytes) != 32 {
return curve, nil, r, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
}
}
return curve, bytes, r, nil
}
func MarshalPublicKey(curve Curve, b []byte) []byte {
switch curve {
case Curve_CURVE25519:
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
case Curve_P256:
return pem.EncodeToMemory(&pem.Block{Type: P256PublicKeyBanner, Bytes: b})
default:
return nil
}
}
// MarshalX25519PublicKey is a simple helper to PEM encode an X25519 public key // MarshalX25519PublicKey is a simple helper to PEM encode an X25519 public key
func MarshalX25519PublicKey(b []byte) []byte { func MarshalX25519PublicKey(b []byte) []byte {
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b}) return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
@@ -451,30 +181,6 @@ func MarshalEd25519PublicKey(key ed25519.PublicKey) []byte {
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: key}) return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: key})
} }
func UnmarshalPublicKey(b []byte) ([]byte, []byte, Curve, error) {
k, r := pem.Decode(b)
if k == nil {
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
}
var expectedLen int
var curve Curve
switch k.Type {
case X25519PublicKeyBanner:
expectedLen = 32
curve = Curve_CURVE25519
case P256PublicKeyBanner:
// Uncompressed
expectedLen = 65
curve = Curve_P256
default:
return nil, r, 0, fmt.Errorf("bytes did not contain a proper nebula public key banner")
}
if len(k.Bytes) != expectedLen {
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s public key", expectedLen, curve)
}
return k.Bytes, r, curve, nil
}
// UnmarshalX25519PublicKey will try to pem decode an X25519 public key, returning any other bytes b // UnmarshalX25519PublicKey will try to pem decode an X25519 public key, returning any other bytes b
// or an error on failure // or an error on failure
func UnmarshalX25519PublicKey(b []byte) ([]byte, []byte, error) { func UnmarshalX25519PublicKey(b []byte) ([]byte, []byte, error) {
@@ -510,86 +216,27 @@ func UnmarshalEd25519PublicKey(b []byte) (ed25519.PublicKey, []byte, error) {
} }
// Sign signs a nebula cert with the provided private key // Sign signs a nebula cert with the provided private key
func (nc *NebulaCertificate) Sign(curve Curve, key []byte) error { func (nc *NebulaCertificate) Sign(key ed25519.PrivateKey) error {
if curve != nc.Details.Curve {
return fmt.Errorf("curve in cert and private key supplied don't match")
}
b, err := proto.Marshal(nc.getRawDetails()) b, err := proto.Marshal(nc.getRawDetails())
if err != nil { if err != nil {
return err return err
} }
var sig []byte sig, err := key.Sign(rand.Reader, b, crypto.Hash(0))
switch curve {
case Curve_CURVE25519:
signer := ed25519.PrivateKey(key)
sig = ed25519.Sign(signer, b)
case Curve_P256:
signer := &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: elliptic.P256(),
},
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L95
D: new(big.Int).SetBytes(key),
}
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L119
signer.X, signer.Y = signer.Curve.ScalarBaseMult(key)
// We need to hash first for ECDSA
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
hashed := sha256.Sum256(b)
sig, err = ecdsa.SignASN1(rand.Reader, signer, hashed[:])
if err != nil { if err != nil {
return err return err
} }
default:
return fmt.Errorf("invalid curve: %s", nc.Details.Curve)
}
nc.Signature = sig nc.Signature = sig
return nil return nil
} }
// CheckSignature verifies the signature against the provided public key // CheckSignature verifies the signature against the provided public key
func (nc *NebulaCertificate) CheckSignature(key []byte) bool { func (nc *NebulaCertificate) CheckSignature(key ed25519.PublicKey) bool {
b, err := proto.Marshal(nc.getRawDetails()) b, err := proto.Marshal(nc.getRawDetails())
if err != nil { if err != nil {
return false return false
} }
switch nc.Details.Curve { return ed25519.Verify(key, b, nc.Signature)
case Curve_CURVE25519:
return ed25519.Verify(ed25519.PublicKey(key), b, nc.Signature)
case Curve_P256:
x, y := elliptic.Unmarshal(elliptic.P256(), key)
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
hashed := sha256.Sum256(b)
return ecdsa.VerifyASN1(pubKey, hashed[:], nc.Signature)
default:
return false
}
}
// NOTE: This uses an internal cache that will not be invalidated automatically
// if you manually change any fields in the NebulaCertificate.
func (nc *NebulaCertificate) checkSignatureWithCache(key []byte, useCache bool) bool {
if !useCache {
return nc.CheckSignature(key)
}
if v := nc.signatureVerified.Load(); v != nil {
return bytes.Equal(*v, key)
}
verified := nc.CheckSignature(key)
if verified {
keyCopy := make([]byte, len(key))
copy(keyCopy, key)
nc.signatureVerified.Store(&keyCopy)
}
return verified
} }
// Expired will return true if the nebula cert is too young or too old compared to the provided time, otherwise false // Expired will return true if the nebula cert is too young or too old compared to the provided time, otherwise false
@@ -599,27 +246,8 @@ func (nc *NebulaCertificate) Expired(t time.Time) bool {
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc) // Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) { func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
return nc.verify(t, ncp, false) if ncp.IsBlocklisted(nc) {
} return false, fmt.Errorf("certificate has been blocked")
// VerifyWithCache will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
//
// NOTE: This uses an internal cache that will not be invalidated automatically
// if you manually change any fields in the NebulaCertificate.
func (nc *NebulaCertificate) VerifyWithCache(t time.Time, ncp *NebulaCAPool) (bool, error) {
return nc.verify(t, ncp, true)
}
// ResetCache resets the cache used by VerifyWithCache.
func (nc *NebulaCertificate) ResetCache() {
nc.sha256sum.Store(nil)
nc.signatureVerified.Store(nil)
}
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
func (nc *NebulaCertificate) verify(t time.Time, ncp *NebulaCAPool, useCache bool) (bool, error) {
if ncp.isBlocklistedWithCache(nc, useCache) {
return false, ErrBlockListed
} }
signer, err := ncp.GetCAForCert(nc) signer, err := ncp.GetCAForCert(nc)
@@ -628,15 +256,15 @@ func (nc *NebulaCertificate) verify(t time.Time, ncp *NebulaCAPool, useCache boo
} }
if signer.Expired(t) { if signer.Expired(t) {
return false, ErrRootExpired return false, fmt.Errorf("root certificate is expired")
} }
if nc.Expired(t) { if nc.Expired(t) {
return false, ErrExpired return false, fmt.Errorf("certificate is expired")
} }
if !nc.checkSignatureWithCache(signer.Details.PublicKey, useCache) { if !nc.CheckSignature(signer.Details.PublicKey) {
return false, ErrSignatureMismatch return false, fmt.Errorf("certificate signature did not match")
} }
if err := nc.CheckRootConstrains(signer); err != nil { if err := nc.CheckRootConstrains(signer); err != nil {
@@ -689,57 +317,13 @@ func (nc *NebulaCertificate) CheckRootConstrains(signer *NebulaCertificate) erro
} }
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match // VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
func (nc *NebulaCertificate) VerifyPrivateKey(curve Curve, key []byte) error { func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error {
if curve != nc.Details.Curve { var dst, key32 [32]byte
return fmt.Errorf("curve in cert and private key supplied don't match") copy(key32[:], key)
} curve25519.ScalarBaseMult(&dst, &key32)
if nc.Details.IsCA { if !bytes.Equal(dst[:], nc.Details.PublicKey) {
switch curve {
case Curve_CURVE25519:
// the call to PublicKey below will panic slice bounds out of range otherwise
if len(key) != ed25519.PrivateKeySize {
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
}
if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) {
return fmt.Errorf("public key in cert and private key supplied don't match") return fmt.Errorf("public key in cert and private key supplied don't match")
} }
case Curve_P256:
privkey, err := ecdh.P256().NewPrivateKey(key)
if err != nil {
return fmt.Errorf("cannot parse private key as P256")
}
pub := privkey.PublicKey().Bytes()
if !bytes.Equal(pub, nc.Details.PublicKey) {
return fmt.Errorf("public key in cert and private key supplied don't match")
}
default:
return fmt.Errorf("invalid curve: %s", curve)
}
return nil
}
var pub []byte
switch curve {
case Curve_CURVE25519:
var err error
pub, err = curve25519.X25519(key, curve25519.Basepoint)
if err != nil {
return err
}
case Curve_P256:
privkey, err := ecdh.P256().NewPrivateKey(key)
if err != nil {
return err
}
pub = privkey.PublicKey().Bytes()
default:
return fmt.Errorf("invalid curve: %s", curve)
}
if !bytes.Equal(pub, nc.Details.PublicKey) {
return fmt.Errorf("public key in cert and private key supplied don't match")
}
return nil return nil
} }
@@ -788,7 +372,6 @@ func (nc *NebulaCertificate) String() string {
s += fmt.Sprintf("\t\tIs CA: %v\n", nc.Details.IsCA) s += fmt.Sprintf("\t\tIs CA: %v\n", nc.Details.IsCA)
s += fmt.Sprintf("\t\tIssuer: %s\n", nc.Details.Issuer) s += fmt.Sprintf("\t\tIssuer: %s\n", nc.Details.Issuer)
s += fmt.Sprintf("\t\tPublic key: %x\n", nc.Details.PublicKey) s += fmt.Sprintf("\t\tPublic key: %x\n", nc.Details.PublicKey)
s += fmt.Sprintf("\t\tCurve: %s\n", nc.Details.Curve)
s += "\t}\n" s += "\t}\n"
fp, err := nc.Sha256Sum() fp, err := nc.Sha256Sum()
if err == nil { if err == nil {
@@ -809,7 +392,6 @@ func (nc *NebulaCertificate) getRawDetails() *RawNebulaCertificateDetails {
NotAfter: nc.Details.NotAfter.Unix(), NotAfter: nc.Details.NotAfter.Unix(),
PublicKey: make([]byte, len(nc.Details.PublicKey)), PublicKey: make([]byte, len(nc.Details.PublicKey)),
IsCA: nc.Details.IsCA, IsCA: nc.Details.IsCA,
Curve: nc.Details.Curve,
} }
for _, ipNet := range nc.Details.Ips { for _, ipNet := range nc.Details.Ips {
@@ -858,25 +440,6 @@ func (nc *NebulaCertificate) Sha256Sum() (string, error) {
return hex.EncodeToString(sum[:]), nil return hex.EncodeToString(sum[:]), nil
} }
// NOTE: This uses an internal cache that will not be invalidated automatically
// if you manually change any fields in the NebulaCertificate.
func (nc *NebulaCertificate) sha256SumWithCache(useCache bool) (string, error) {
if !useCache {
return nc.Sha256Sum()
}
if s := nc.sha256sum.Load(); s != nil {
return *s, nil
}
s, err := nc.Sha256Sum()
if err != nil {
return s, err
}
nc.sha256sum.Store(&s)
return s, nil
}
func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) { func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
toString := func(ips []*net.IPNet) []string { toString := func(ips []*net.IPNet) []string {
s := []string{} s := []string{}
@@ -898,7 +461,6 @@ func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
"publicKey": fmt.Sprintf("%x", nc.Details.PublicKey), "publicKey": fmt.Sprintf("%x", nc.Details.PublicKey),
"isCa": nc.Details.IsCA, "isCa": nc.Details.IsCA,
"issuer": nc.Details.Issuer, "issuer": nc.Details.Issuer,
"curve": nc.Details.Curve.String(),
}, },
"fingerprint": fp, "fingerprint": fp,
"signature": fmt.Sprintf("%x", nc.Signature), "signature": fmt.Sprintf("%x", nc.Signature),

View File

@@ -1,620 +1,202 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
// protoc v3.21.5
// source: cert.proto // source: cert.proto
package cert package cert
import ( import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect" fmt "fmt"
protoimpl "google.golang.org/protobuf/runtime/protoimpl" proto "github.com/golang/protobuf/proto"
reflect "reflect" math "math"
sync "sync"
) )
const ( // Reference imports to suppress errors if they are not otherwise used.
// Verify that this generated code is sufficiently up-to-date. var _ = proto.Marshal
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) var _ = fmt.Errorf
// Verify that runtime/protoimpl is sufficiently up-to-date. var _ = math.Inf
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Curve int32 // This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const ( // A compilation error at this line likely means your copy of the
Curve_CURVE25519 Curve = 0 // proto package needs to be updated.
Curve_P256 Curve = 1 const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
)
// Enum value maps for Curve.
var (
Curve_name = map[int32]string{
0: "CURVE25519",
1: "P256",
}
Curve_value = map[string]int32{
"CURVE25519": 0,
"P256": 1,
}
)
func (x Curve) Enum() *Curve {
p := new(Curve)
*p = x
return p
}
func (x Curve) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Curve) Descriptor() protoreflect.EnumDescriptor {
return file_cert_proto_enumTypes[0].Descriptor()
}
func (Curve) Type() protoreflect.EnumType {
return &file_cert_proto_enumTypes[0]
}
func (x Curve) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Curve.Descriptor instead.
func (Curve) EnumDescriptor() ([]byte, []int) {
return file_cert_proto_rawDescGZIP(), []int{0}
}
type RawNebulaCertificate struct { type RawNebulaCertificate struct {
state protoimpl.MessageState Details *RawNebulaCertificateDetails `protobuf:"bytes,1,opt,name=Details,json=details,proto3" json:"Details,omitempty"`
sizeCache protoimpl.SizeCache Signature []byte `protobuf:"bytes,2,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty"`
unknownFields protoimpl.UnknownFields XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
Details *RawNebulaCertificateDetails `protobuf:"bytes,1,opt,name=Details,proto3" json:"Details,omitempty"` XXX_sizecache int32 `json:"-"`
Signature []byte `protobuf:"bytes,2,opt,name=Signature,proto3" json:"Signature,omitempty"`
}
func (x *RawNebulaCertificate) Reset() {
*x = RawNebulaCertificate{}
if protoimpl.UnsafeEnabled {
mi := &file_cert_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RawNebulaCertificate) String() string {
return protoimpl.X.MessageStringOf(x)
} }
func (m *RawNebulaCertificate) Reset() { *m = RawNebulaCertificate{} }
func (m *RawNebulaCertificate) String() string { return proto.CompactTextString(m) }
func (*RawNebulaCertificate) ProtoMessage() {} func (*RawNebulaCertificate) ProtoMessage() {}
func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
mi := &file_cert_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RawNebulaCertificate.ProtoReflect.Descriptor instead.
func (*RawNebulaCertificate) Descriptor() ([]byte, []int) { func (*RawNebulaCertificate) Descriptor() ([]byte, []int) {
return file_cert_proto_rawDescGZIP(), []int{0} return fileDescriptor_a142e29cbef9b1cf, []int{0}
} }
func (x *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails { func (m *RawNebulaCertificate) XXX_Unmarshal(b []byte) error {
if x != nil { return xxx_messageInfo_RawNebulaCertificate.Unmarshal(m, b)
return x.Details }
func (m *RawNebulaCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RawNebulaCertificate.Marshal(b, m, deterministic)
}
func (m *RawNebulaCertificate) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawNebulaCertificate.Merge(m, src)
}
func (m *RawNebulaCertificate) XXX_Size() int {
return xxx_messageInfo_RawNebulaCertificate.Size(m)
}
func (m *RawNebulaCertificate) XXX_DiscardUnknown() {
xxx_messageInfo_RawNebulaCertificate.DiscardUnknown(m)
}
var xxx_messageInfo_RawNebulaCertificate proto.InternalMessageInfo
func (m *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
if m != nil {
return m.Details
} }
return nil return nil
} }
func (x *RawNebulaCertificate) GetSignature() []byte { func (m *RawNebulaCertificate) GetSignature() []byte {
if x != nil { if m != nil {
return x.Signature return m.Signature
} }
return nil return nil
} }
type RawNebulaCertificateDetails struct { type RawNebulaCertificateDetails struct {
state protoimpl.MessageState Name string `protobuf:"bytes,1,opt,name=Name,json=name,proto3" json:"Name,omitempty"`
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
// Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask // Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask
Ips []uint32 `protobuf:"varint,2,rep,packed,name=Ips,proto3" json:"Ips,omitempty"` Ips []uint32 `protobuf:"varint,2,rep,packed,name=Ips,json=ips,proto3" json:"Ips,omitempty"`
Subnets []uint32 `protobuf:"varint,3,rep,packed,name=Subnets,proto3" json:"Subnets,omitempty"` Subnets []uint32 `protobuf:"varint,3,rep,packed,name=Subnets,json=subnets,proto3" json:"Subnets,omitempty"`
Groups []string `protobuf:"bytes,4,rep,name=Groups,proto3" json:"Groups,omitempty"` Groups []string `protobuf:"bytes,4,rep,name=Groups,json=groups,proto3" json:"Groups,omitempty"`
NotBefore int64 `protobuf:"varint,5,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"` NotBefore int64 `protobuf:"varint,5,opt,name=NotBefore,json=notBefore,proto3" json:"NotBefore,omitempty"`
NotAfter int64 `protobuf:"varint,6,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` NotAfter int64 `protobuf:"varint,6,opt,name=NotAfter,json=notAfter,proto3" json:"NotAfter,omitempty"`
PublicKey []byte `protobuf:"bytes,7,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"` PublicKey []byte `protobuf:"bytes,7,opt,name=PublicKey,json=publicKey,proto3" json:"PublicKey,omitempty"`
IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"` IsCA bool `protobuf:"varint,8,opt,name=IsCA,json=isCA,proto3" json:"IsCA,omitempty"`
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed // sha-256 of the issuer certificate, if this field is blank the cert is self-signed
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"` Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,json=issuer,proto3" json:"Issuer,omitempty"`
Curve Curve `protobuf:"varint,100,opt,name=curve,proto3,enum=cert.Curve" json:"curve,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
} XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
func (x *RawNebulaCertificateDetails) Reset() {
*x = RawNebulaCertificateDetails{}
if protoimpl.UnsafeEnabled {
mi := &file_cert_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RawNebulaCertificateDetails) String() string {
return protoimpl.X.MessageStringOf(x)
} }
func (m *RawNebulaCertificateDetails) Reset() { *m = RawNebulaCertificateDetails{} }
func (m *RawNebulaCertificateDetails) String() string { return proto.CompactTextString(m) }
func (*RawNebulaCertificateDetails) ProtoMessage() {} func (*RawNebulaCertificateDetails) ProtoMessage() {}
func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
mi := &file_cert_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RawNebulaCertificateDetails.ProtoReflect.Descriptor instead.
func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) { func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) {
return file_cert_proto_rawDescGZIP(), []int{1} return fileDescriptor_a142e29cbef9b1cf, []int{1}
} }
func (x *RawNebulaCertificateDetails) GetName() string { func (m *RawNebulaCertificateDetails) XXX_Unmarshal(b []byte) error {
if x != nil { return xxx_messageInfo_RawNebulaCertificateDetails.Unmarshal(m, b)
return x.Name }
func (m *RawNebulaCertificateDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RawNebulaCertificateDetails.Marshal(b, m, deterministic)
}
func (m *RawNebulaCertificateDetails) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawNebulaCertificateDetails.Merge(m, src)
}
func (m *RawNebulaCertificateDetails) XXX_Size() int {
return xxx_messageInfo_RawNebulaCertificateDetails.Size(m)
}
func (m *RawNebulaCertificateDetails) XXX_DiscardUnknown() {
xxx_messageInfo_RawNebulaCertificateDetails.DiscardUnknown(m)
}
var xxx_messageInfo_RawNebulaCertificateDetails proto.InternalMessageInfo
func (m *RawNebulaCertificateDetails) GetName() string {
if m != nil {
return m.Name
} }
return "" return ""
} }
func (x *RawNebulaCertificateDetails) GetIps() []uint32 { func (m *RawNebulaCertificateDetails) GetIps() []uint32 {
if x != nil { if m != nil {
return x.Ips return m.Ips
} }
return nil return nil
} }
func (x *RawNebulaCertificateDetails) GetSubnets() []uint32 { func (m *RawNebulaCertificateDetails) GetSubnets() []uint32 {
if x != nil { if m != nil {
return x.Subnets return m.Subnets
} }
return nil return nil
} }
func (x *RawNebulaCertificateDetails) GetGroups() []string { func (m *RawNebulaCertificateDetails) GetGroups() []string {
if x != nil { if m != nil {
return x.Groups return m.Groups
} }
return nil return nil
} }
func (x *RawNebulaCertificateDetails) GetNotBefore() int64 { func (m *RawNebulaCertificateDetails) GetNotBefore() int64 {
if x != nil { if m != nil {
return x.NotBefore return m.NotBefore
} }
return 0 return 0
} }
func (x *RawNebulaCertificateDetails) GetNotAfter() int64 { func (m *RawNebulaCertificateDetails) GetNotAfter() int64 {
if x != nil { if m != nil {
return x.NotAfter return m.NotAfter
} }
return 0 return 0
} }
func (x *RawNebulaCertificateDetails) GetPublicKey() []byte { func (m *RawNebulaCertificateDetails) GetPublicKey() []byte {
if x != nil { if m != nil {
return x.PublicKey return m.PublicKey
} }
return nil return nil
} }
func (x *RawNebulaCertificateDetails) GetIsCA() bool { func (m *RawNebulaCertificateDetails) GetIsCA() bool {
if x != nil { if m != nil {
return x.IsCA return m.IsCA
} }
return false return false
} }
func (x *RawNebulaCertificateDetails) GetIssuer() []byte { func (m *RawNebulaCertificateDetails) GetIssuer() []byte {
if x != nil { if m != nil {
return x.Issuer return m.Issuer
} }
return nil return nil
} }
func (x *RawNebulaCertificateDetails) GetCurve() Curve { func init() {
if x != nil { proto.RegisterType((*RawNebulaCertificate)(nil), "cert.RawNebulaCertificate")
return x.Curve proto.RegisterType((*RawNebulaCertificateDetails)(nil), "cert.RawNebulaCertificateDetails")
}
return Curve_CURVE25519
} }
type RawNebulaEncryptedData struct { func init() { proto.RegisterFile("cert.proto", fileDescriptor_a142e29cbef9b1cf) }
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
EncryptionMetadata *RawNebulaEncryptionMetadata `protobuf:"bytes,1,opt,name=EncryptionMetadata,proto3" json:"EncryptionMetadata,omitempty"` var fileDescriptor_a142e29cbef9b1cf = []byte{
Ciphertext []byte `protobuf:"bytes,2,opt,name=Ciphertext,proto3" json:"Ciphertext,omitempty"` // 279 bytes of a gzipped FileDescriptorProto
} 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xcf, 0x4a, 0xf4, 0x30,
0x14, 0xc5, 0xc9, 0xa4, 0x5f, 0xdb, 0xe4, 0x53, 0x90, 0x20, 0x12, 0xd4, 0x45, 0x9c, 0x55, 0x56,
func (x *RawNebulaEncryptedData) Reset() { 0xb3, 0xd0, 0xa5, 0xab, 0x71, 0x04, 0x29, 0x42, 0x91, 0xcc, 0x13, 0xa4, 0xf5, 0x76, 0x08, 0x74,
*x = RawNebulaEncryptedData{} 0x9a, 0x9a, 0x3f, 0x88, 0x8f, 0xee, 0x4e, 0x9a, 0x4e, 0x77, 0xe2, 0xee, 0x9e, 0x5f, 0xce, 0x49,
if protoimpl.UnsafeEnabled { 0x4e, 0x2e, 0xa5, 0x2d, 0xb8, 0xb0, 0x19, 0x9d, 0x0d, 0x96, 0x65, 0xd3, 0xbc, 0xfe, 0xa0, 0x97,
mi := &file_cert_proto_msgTypes[2] 0x4a, 0x7f, 0xd6, 0xd0, 0xc4, 0x5e, 0xef, 0xc0, 0x05, 0xd3, 0x99, 0x56, 0x07, 0x60, 0x8f, 0xb4,
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 0x78, 0x86, 0xa0, 0x4d, 0xef, 0x39, 0x12, 0x48, 0xfe, 0xbf, 0xbf, 0xdb, 0xa4, 0xec, 0x6f, 0xe6,
ms.StoreMessageInfo(mi) 0x93, 0x51, 0x15, 0xef, 0xf3, 0xc0, 0x6e, 0x29, 0xd9, 0x9b, 0xc3, 0xa0, 0x43, 0x74, 0xc0, 0x57,
} 0x02, 0xc9, 0x33, 0x45, 0xfc, 0x02, 0xd6, 0xdf, 0x88, 0xde, 0xfc, 0x71, 0x0d, 0x63, 0x34, 0xab,
} 0xf5, 0x11, 0xd2, 0xbb, 0x44, 0x65, 0x83, 0x3e, 0x02, 0xbb, 0xa0, 0xb8, 0x1a, 0x3d, 0x5f, 0x09,
0x2c, 0xcf, 0x15, 0x36, 0xa3, 0x67, 0x9c, 0x16, 0xfb, 0xd8, 0x0c, 0x10, 0x3c, 0xc7, 0x89, 0x16,
func (x *RawNebulaEncryptedData) String() string { 0x7e, 0x96, 0xec, 0x8a, 0xe6, 0x2f, 0xce, 0xc6, 0xd1, 0xf3, 0x4c, 0x60, 0x49, 0x54, 0x7e, 0x48,
return protoimpl.X.MessageStringOf(x) 0x6a, 0x6a, 0x55, 0xdb, 0xf0, 0x04, 0x9d, 0x75, 0xc0, 0xff, 0x09, 0x24, 0xb1, 0x22, 0xc3, 0x02,
} 0xd8, 0x35, 0x2d, 0x6b, 0x1b, 0xb6, 0x5d, 0x00, 0xc7, 0xf3, 0x74, 0x58, 0x0e, 0x27, 0x3d, 0x25,
0xdf, 0x62, 0xd3, 0x9b, 0xf6, 0x15, 0xbe, 0x78, 0x31, 0xff, 0x67, 0x5c, 0xc0, 0xd4, 0xb7, 0xf2,
func (*RawNebulaEncryptedData) ProtoMessage() {} 0xbb, 0x2d, 0x2f, 0x05, 0x92, 0xa5, 0xca, 0x8c, 0xdf, 0x6d, 0xa7, 0x0e, 0x95, 0xf7, 0x11, 0x1c,
0x27, 0xc9, 0x9e, 0x9b, 0xa4, 0x9a, 0x3c, 0xed, 0xfe, 0xe1, 0x27, 0x00, 0x00, 0xff, 0xff, 0x2c,
func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message { 0xe3, 0x08, 0x37, 0x89, 0x01, 0x00, 0x00,
mi := &file_cert_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RawNebulaEncryptedData.ProtoReflect.Descriptor instead.
func (*RawNebulaEncryptedData) Descriptor() ([]byte, []int) {
return file_cert_proto_rawDescGZIP(), []int{2}
}
func (x *RawNebulaEncryptedData) GetEncryptionMetadata() *RawNebulaEncryptionMetadata {
if x != nil {
return x.EncryptionMetadata
}
return nil
}
func (x *RawNebulaEncryptedData) GetCiphertext() []byte {
if x != nil {
return x.Ciphertext
}
return nil
}
type RawNebulaEncryptionMetadata struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=EncryptionAlgorithm,proto3" json:"EncryptionAlgorithm,omitempty"`
Argon2Parameters *RawNebulaArgon2Parameters `protobuf:"bytes,2,opt,name=Argon2Parameters,proto3" json:"Argon2Parameters,omitempty"`
}
func (x *RawNebulaEncryptionMetadata) Reset() {
*x = RawNebulaEncryptionMetadata{}
if protoimpl.UnsafeEnabled {
mi := &file_cert_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RawNebulaEncryptionMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RawNebulaEncryptionMetadata) ProtoMessage() {}
func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
mi := &file_cert_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RawNebulaEncryptionMetadata.ProtoReflect.Descriptor instead.
func (*RawNebulaEncryptionMetadata) Descriptor() ([]byte, []int) {
return file_cert_proto_rawDescGZIP(), []int{3}
}
func (x *RawNebulaEncryptionMetadata) GetEncryptionAlgorithm() string {
if x != nil {
return x.EncryptionAlgorithm
}
return ""
}
func (x *RawNebulaEncryptionMetadata) GetArgon2Parameters() *RawNebulaArgon2Parameters {
if x != nil {
return x.Argon2Parameters
}
return nil
}
type RawNebulaArgon2Parameters struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // rune in Go
Memory uint32 `protobuf:"varint,2,opt,name=memory,proto3" json:"memory,omitempty"`
Parallelism uint32 `protobuf:"varint,4,opt,name=parallelism,proto3" json:"parallelism,omitempty"` // uint8 in Go
Iterations uint32 `protobuf:"varint,3,opt,name=iterations,proto3" json:"iterations,omitempty"`
Salt []byte `protobuf:"bytes,5,opt,name=salt,proto3" json:"salt,omitempty"`
}
func (x *RawNebulaArgon2Parameters) Reset() {
*x = RawNebulaArgon2Parameters{}
if protoimpl.UnsafeEnabled {
mi := &file_cert_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RawNebulaArgon2Parameters) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RawNebulaArgon2Parameters) ProtoMessage() {}
func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
mi := &file_cert_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RawNebulaArgon2Parameters.ProtoReflect.Descriptor instead.
func (*RawNebulaArgon2Parameters) Descriptor() ([]byte, []int) {
return file_cert_proto_rawDescGZIP(), []int{4}
}
func (x *RawNebulaArgon2Parameters) GetVersion() int32 {
if x != nil {
return x.Version
}
return 0
}
func (x *RawNebulaArgon2Parameters) GetMemory() uint32 {
if x != nil {
return x.Memory
}
return 0
}
func (x *RawNebulaArgon2Parameters) GetParallelism() uint32 {
if x != nil {
return x.Parallelism
}
return 0
}
func (x *RawNebulaArgon2Parameters) GetIterations() uint32 {
if x != nil {
return x.Iterations
}
return 0
}
func (x *RawNebulaArgon2Parameters) GetSalt() []byte {
if x != nil {
return x.Salt
}
return nil
}
var File_cert_proto protoreflect.FileDescriptor
var file_cert_proto_rawDesc = []byte{
0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x65,
0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x44, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x53,
0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x53, 0x75,
0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18,
0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x0a,
0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4e,
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x4e,
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69,
0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x75, 0x62, 0x6c,
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
0x72, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0e,
0x32, 0x0b, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x75, 0x72, 0x76, 0x65, 0x52, 0x05, 0x63,
0x75, 0x72, 0x76, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
0x51, 0x0a, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72,
0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12,
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
0x78, 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61,
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
0x69, 0x74, 0x68, 0x6d, 0x12, 0x4b, 0x0a, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61,
0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52,
0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
0x73, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12,
0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d,
0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72,
0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c,
0x69, 0x73, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x2a, 0x21, 0x0a, 0x05, 0x43, 0x75, 0x72, 0x76, 0x65,
0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x52, 0x56, 0x45, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x00,
0x12, 0x08, 0x0a, 0x04, 0x50, 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71,
0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_cert_proto_rawDescOnce sync.Once
file_cert_proto_rawDescData = file_cert_proto_rawDesc
)
func file_cert_proto_rawDescGZIP() []byte {
file_cert_proto_rawDescOnce.Do(func() {
file_cert_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_proto_rawDescData)
})
return file_cert_proto_rawDescData
}
var file_cert_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_cert_proto_goTypes = []interface{}{
(Curve)(0), // 0: cert.Curve
(*RawNebulaCertificate)(nil), // 1: cert.RawNebulaCertificate
(*RawNebulaCertificateDetails)(nil), // 2: cert.RawNebulaCertificateDetails
(*RawNebulaEncryptedData)(nil), // 3: cert.RawNebulaEncryptedData
(*RawNebulaEncryptionMetadata)(nil), // 4: cert.RawNebulaEncryptionMetadata
(*RawNebulaArgon2Parameters)(nil), // 5: cert.RawNebulaArgon2Parameters
}
var file_cert_proto_depIdxs = []int32{
2, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
0, // 1: cert.RawNebulaCertificateDetails.curve:type_name -> cert.Curve
4, // 2: cert.RawNebulaEncryptedData.EncryptionMetadata:type_name -> cert.RawNebulaEncryptionMetadata
5, // 3: cert.RawNebulaEncryptionMetadata.Argon2Parameters:type_name -> cert.RawNebulaArgon2Parameters
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_cert_proto_init() }
func file_cert_proto_init() {
if File_cert_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_cert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RawNebulaCertificate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RawNebulaCertificateDetails); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RawNebulaEncryptedData); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cert_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RawNebulaEncryptionMetadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cert_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RawNebulaArgon2Parameters); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_cert_proto_rawDesc,
NumEnums: 1,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cert_proto_goTypes,
DependencyIndexes: file_cert_proto_depIdxs,
EnumInfos: file_cert_proto_enumTypes,
MessageInfos: file_cert_proto_msgTypes,
}.Build()
File_cert_proto = out.File
file_cert_proto_rawDesc = nil
file_cert_proto_goTypes = nil
file_cert_proto_depIdxs = nil
} }

View File

@@ -1,15 +1,8 @@
syntax = "proto3"; syntax = "proto3";
package cert; package cert;
option go_package = "github.com/slackhq/nebula/cert";
//import "google/protobuf/timestamp.proto"; //import "google/protobuf/timestamp.proto";
enum Curve {
CURVE25519 = 0;
P256 = 1;
}
message RawNebulaCertificate { message RawNebulaCertificate {
RawNebulaCertificateDetails Details = 1; RawNebulaCertificateDetails Details = 1;
bytes Signature = 2; bytes Signature = 2;
@@ -31,24 +24,4 @@ message RawNebulaCertificateDetails {
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed // sha-256 of the issuer certificate, if this field is blank the cert is self-signed
bytes Issuer = 9; bytes Issuer = 9;
Curve curve = 100;
}
message RawNebulaEncryptedData {
RawNebulaEncryptionMetadata EncryptionMetadata = 1;
bytes Ciphertext = 2;
}
message RawNebulaEncryptionMetadata {
string EncryptionAlgorithm = 1;
RawNebulaArgon2Parameters Argon2Parameters = 2;
}
message RawNebulaArgon2Parameters {
int32 version = 1; // rune in Go
uint32 memory = 2;
uint32 parallelism = 4; // uint8 in Go
uint32 iterations = 3;
bytes salt = 5;
} }

View File

@@ -1,9 +1,6 @@
package cert package cert
import ( import (
"crypto/ecdh"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"io" "io"
@@ -11,11 +8,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/slackhq/nebula/test" "github.com/golang/protobuf/proto"
"github.com/slackhq/nebula/util"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519" "golang.org/x/crypto/ed25519"
"google.golang.org/protobuf/proto"
) )
func TestMarshalingNebulaCertificate(t *testing.T) { func TestMarshalingNebulaCertificate(t *testing.T) {
@@ -104,49 +101,7 @@ func TestNebulaCertificate_Sign(t *testing.T) {
pub, priv, err := ed25519.GenerateKey(rand.Reader) pub, priv, err := ed25519.GenerateKey(rand.Reader)
assert.Nil(t, err) assert.Nil(t, err)
assert.False(t, nc.CheckSignature(pub)) assert.False(t, nc.CheckSignature(pub))
assert.Nil(t, nc.Sign(Curve_CURVE25519, priv)) assert.Nil(t, nc.Sign(priv))
assert.True(t, nc.CheckSignature(pub))
_, err = nc.Marshal()
assert.Nil(t, err)
//t.Log("Cert size:", len(b))
}
func TestNebulaCertificate_SignP256(t *testing.T) {
before := time.Now().Add(time.Second * -60).Round(time.Second)
after := time.Now().Add(time.Second * 60).Round(time.Second)
pubKey := []byte("01234567890abcedfghij1234567890ab1234567890abcedfghij1234567890ab")
nc := NebulaCertificate{
Details: NebulaCertificateDetails{
Name: "testing",
Ips: []*net.IPNet{
{IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
{IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
{IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
},
Subnets: []*net.IPNet{
{IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))},
{IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))},
{IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))},
},
Groups: []string{"test-group1", "test-group2", "test-group3"},
NotBefore: before,
NotAfter: after,
PublicKey: pubKey,
IsCA: false,
Curve: Curve_P256,
Issuer: "1234567890abcedfghij1234567890ab",
},
}
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
rawPriv := priv.D.FillBytes(make([]byte, 32))
assert.Nil(t, err)
assert.False(t, nc.CheckSignature(pub))
assert.Nil(t, nc.Sign(Curve_P256, rawPriv))
assert.True(t, nc.CheckSignature(pub)) assert.True(t, nc.CheckSignature(pub))
_, err = nc.Marshal() _, err = nc.Marshal()
@@ -198,7 +153,7 @@ func TestNebulaCertificate_MarshalJSON(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal( assert.Equal(
t, t,
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"ips\":[\"10.1.1.1/24\",\"10.1.1.2/16\",\"10.1.1.3/ff00ff00\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"subnets\":[\"9.1.1.1/ff00ff00\",\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"26cb1c30ad7872c804c166b5150fa372f437aa3856b04edb4334b4470ec728e4\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\"}", "{\"details\":{\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"ips\":[\"10.1.1.1/24\",\"10.1.1.2/16\",\"10.1.1.3/ff00ff00\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"subnets\":[\"9.1.1.1/ff00ff00\",\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"26cb1c30ad7872c804c166b5150fa372f437aa3856b04edb4334b4470ec728e4\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\"}",
string(b), string(b),
) )
} }
@@ -222,7 +177,7 @@ func TestNebulaCertificate_Verify(t *testing.T) {
v, err := c.Verify(time.Now(), caPool) v, err := c.Verify(time.Now(), caPool)
assert.False(t, v) assert.False(t, v)
assert.EqualError(t, err, "certificate is in the block list") assert.EqualError(t, err, "certificate has been blocked")
caPool.ResetCertBlocklist() caPool.ResetCertBlocklist()
v, err = c.Verify(time.Now(), caPool) v, err = c.Verify(time.Now(), caPool)
@@ -262,65 +217,6 @@ func TestNebulaCertificate_Verify(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
} }
func TestNebulaCertificate_VerifyP256(t *testing.T) {
ca, _, caKey, err := newTestCaCertP256(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
h, err := ca.Sha256Sum()
assert.Nil(t, err)
caPool := NewCAPool()
caPool.CAs[h] = ca
f, err := c.Sha256Sum()
assert.Nil(t, err)
caPool.BlocklistFingerprint(f)
v, err := c.Verify(time.Now(), caPool)
assert.False(t, v)
assert.EqualError(t, err, "certificate is in the block list")
caPool.ResetCertBlocklist()
v, err = c.Verify(time.Now(), caPool)
assert.True(t, v)
assert.Nil(t, err)
v, err = c.Verify(time.Now().Add(time.Hour*1000), caPool)
assert.False(t, v)
assert.EqualError(t, err, "root certificate is expired")
c, _, _, err = newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
v, err = c.Verify(time.Now().Add(time.Minute*6), caPool)
assert.False(t, v)
assert.EqualError(t, err, "certificate is expired")
// Test group assertion
ca, _, caKey, err = newTestCaCertP256(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "test2"})
assert.Nil(t, err)
caPem, err := ca.MarshalToPEM()
assert.Nil(t, err)
caPool = NewCAPool()
caPool.AddCACertificate(caPem)
c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "bad"})
assert.Nil(t, err)
v, err = c.Verify(time.Now(), caPool)
assert.False(t, v)
assert.EqualError(t, err, "certificate contained a group not present on the signing ca: bad")
c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1"})
assert.Nil(t, err)
v, err = c.Verify(time.Now(), caPool)
assert.True(t, v)
assert.Nil(t, err)
}
func TestNebulaCertificate_Verify_IPs(t *testing.T) { func TestNebulaCertificate_Verify_IPs(t *testing.T) {
_, caIp1, _ := net.ParseCIDR("10.0.0.0/16") _, caIp1, _ := net.ParseCIDR("10.0.0.0/16")
_, caIp2, _ := net.ParseCIDR("192.168.0.0/24") _, caIp2, _ := net.ParseCIDR("192.168.0.0/24")
@@ -479,43 +375,16 @@ func TestNebulaCertificate_Verify_Subnets(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
} }
func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) { func TestNebulaVerifyPrivateKey(t *testing.T) {
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err) assert.Nil(t, err)
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
assert.Nil(t, err)
_, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey2)
assert.NotNil(t, err)
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
err = c.VerifyPrivateKey(Curve_CURVE25519, priv) err = c.VerifyPrivateKey(priv)
assert.Nil(t, err) assert.Nil(t, err)
_, priv2 := x25519Keypair() _, priv2 := x25519Keypair()
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2) err = c.VerifyPrivateKey(priv2)
assert.NotNil(t, err)
}
func TestNebulaCertificate_VerifyPrivateKeyP256(t *testing.T) {
ca, _, caKey, err := newTestCaCertP256(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
err = ca.VerifyPrivateKey(Curve_P256, caKey)
assert.Nil(t, err)
_, _, caKey2, err := newTestCaCertP256(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
err = ca.VerifyPrivateKey(Curve_P256, caKey2)
assert.NotNil(t, err)
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
err = c.VerifyPrivateKey(Curve_P256, priv)
assert.Nil(t, err)
_, priv2 := p256Keypair()
err = c.VerifyPrivateKey(Curve_P256, priv2)
assert.NotNil(t, err) assert.NotNil(t, err)
} }
@@ -553,25 +422,6 @@ BVG+oJpAoqokUBbI4U0N8CSfpUABEkB/Pm5A2xyH/nc8mg/wvGUWG3pZ7nHzaDMf
8/phAUt+FLzqTECzQKisYswKvE3pl9mbEYKbOdIHrxdIp95mo4sF 8/phAUt+FLzqTECzQKisYswKvE3pl9mbEYKbOdIHrxdIp95mo4sF
-----END NEBULA CERTIFICATE----- -----END NEBULA CERTIFICATE-----
`
expired := `
# expired certificate
-----BEGIN NEBULA CERTIFICATE-----
CjkKB2V4cGlyZWQouPmWjQYwufmWjQY6ILCRaoCkJlqHgv5jfDN4lzLHBvDzaQm4
vZxfu144hmgjQAESQG4qlnZi8DncvD/LDZnLgJHOaX1DWCHHEh59epVsC+BNgTie
WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
-----END NEBULA CERTIFICATE-----
`
p256 := `
# p256 certificate
-----BEGIN NEBULA CERTIFICATE-----
CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
6tctO9sPT3jOx8ES6M1nIqOhpTmZeabF/4rELDqPV4aH5jfJut798DUXql0FlF8H
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
-----END NEBULA CERTIFICATE-----
` `
rootCA := NebulaCertificate{ rootCA := NebulaCertificate{
@@ -586,12 +436,6 @@ IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
}, },
} }
rootCAP256 := NebulaCertificate{
Details: NebulaCertificateDetails{
Name: "nebula P256 test",
},
}
p, err := NewCAPoolFromBytes([]byte(noNewLines)) p, err := NewCAPoolFromBytes([]byte(noNewLines))
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
@@ -601,400 +445,6 @@ IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
// expired cert, no valid certs
ppp, err := NewCAPoolFromBytes([]byte(expired))
assert.Equal(t, ErrExpired, err)
assert.Equal(t, ppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
// expired cert, with valid certs
pppp, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
assert.Equal(t, ErrExpired, err)
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
assert.Equal(t, len(pppp.CAs), 3)
ppppp, err := NewCAPoolFromBytes([]byte(p256))
assert.Nil(t, err)
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
assert.Equal(t, len(ppppp.CAs), 1)
}
func appendByteSlices(b ...[]byte) []byte {
retSlice := []byte{}
for _, v := range b {
retSlice = append(retSlice, v...)
}
return retSlice
}
func TestUnmrshalCertPEM(t *testing.T) {
goodCert := []byte(`
# A good cert
-----BEGIN NEBULA CERTIFICATE-----
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
-----END NEBULA CERTIFICATE-----
`)
badBanner := []byte(`# A bad banner
-----BEGIN NOT A NEBULA CERTIFICATE-----
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
-----END NOT A NEBULA CERTIFICATE-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA CERTIFICATE-----
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
-END NEBULA CERTIFICATE----`)
certBundle := appendByteSlices(goodCert, badBanner, invalidPem)
// Success test case
cert, rest, err := UnmarshalNebulaCertificateFromPEM(certBundle)
assert.NotNil(t, cert)
assert.Equal(t, rest, append(badBanner, invalidPem...))
assert.Nil(t, err)
// Fail due to invalid banner.
cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest)
assert.Nil(t, cert)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "bytes did not contain a proper nebula certificate banner")
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest)
assert.Nil(t, cert)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestUnmarshalSigningPrivateKey(t *testing.T) {
privKey := []byte(`# A good key
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA ED25519 PRIVATE KEY-----
`)
privP256Key := []byte(`# A good key
-----BEGIN NEBULA ECDSA P256 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA ECDSA P256 PRIVATE KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-----END NEBULA ED25519 PRIVATE KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NOT A NEBULA PRIVATE KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA ED25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-END NEBULA ED25519 PRIVATE KEY-----`)
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, curve, err := UnmarshalSigningPrivateKey(keyBundle)
assert.Len(t, k, 64)
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
assert.Equal(t, Curve_CURVE25519, curve)
assert.Nil(t, err)
// Success test case
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
assert.Len(t, k, 32)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
assert.Equal(t, Curve_P256, curve)
assert.Nil(t, err)
// Fail due to short key
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 64 bytes, is invalid Ed25519 private key")
// Fail due to invalid banner
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519/ECDSA private key banner")
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, curve, err = UnmarshalSigningPrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
passphrase := []byte("DO NOT USE THIS KEY")
privKey := []byte(`# A good key
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
qrlJ69wer3ZUHFXA
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
`)
shortKey := []byte(`# A key which, once decrypted, is too short
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCoga5h8owMEBWRSMMJKzuUvWce7
k0qlBkQmCxiuLh80MuASW70YcKt8jeEIS2axo2V6zAKA9TSMcCsJW1kDDXEtL/xe
GLF5T7sDl5COp4LU3pGxpV+KoeQ/S3gQCAAcnaOtnJQX+aSDnbO3jCHyP7U9CHbs
rQr3bdH3Oy/WiYU=
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
`)
invalidBanner := []byte(`# Invalid banner (not encrypted)
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
bWRp2CTVFhW9HD/qCd28ltDgK3w8VXSeaEYczDWos8sMUBqDb9jP3+NYwcS4lURG
XgLvodMXZJuaFPssp+WwtA==
-----END NEBULA ED25519 PRIVATE KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
qrlJ69wer3ZUHFXA
-END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
`)
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
// Success test case
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, keyBundle)
assert.Nil(t, err)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Len(t, k, 64)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
// Fail due to short key
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
// Fail due to invalid banner
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
assert.EqualError(t, err, "bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
// Fail due to invalid passphrase
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey([]byte("invalid passphrase"), privKey)
assert.EqualError(t, err, "invalid passphrase or corrupt private key")
assert.Nil(t, k)
assert.Equal(t, rest, []byte{})
}
func TestEncryptAndMarshalSigningPrivateKey(t *testing.T) {
// Having proved that decryption works correctly above, we can test the
// encryption function produces a value which can be decrypted
passphrase := []byte("passphrase")
bytes := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
kdfParams := NewArgon2Parameters(64*1024, 4, 3)
key, err := EncryptAndMarshalSigningPrivateKey(Curve_CURVE25519, bytes, passphrase, kdfParams)
assert.Nil(t, err)
// Verify the "key" can be decrypted successfully
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, key)
assert.Len(t, k, 64)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, []byte{})
assert.Nil(t, err)
// EncryptAndMarshalEd25519PrivateKey does not create any errors itself
}
func TestUnmarshalPrivateKey(t *testing.T) {
privKey := []byte(`# A good key
-----BEGIN NEBULA X25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA X25519 PRIVATE KEY-----
`)
privP256Key := []byte(`# A good key
-----BEGIN NEBULA P256 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA P256 PRIVATE KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA X25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA X25519 PRIVATE KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NOT A NEBULA PRIVATE KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA X25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-END NEBULA X25519 PRIVATE KEY-----`)
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, curve, err := UnmarshalPrivateKey(keyBundle)
assert.Len(t, k, 32)
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
assert.Equal(t, Curve_CURVE25519, curve)
assert.Nil(t, err)
// Success test case
k, rest, curve, err = UnmarshalPrivateKey(rest)
assert.Len(t, k, 32)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
assert.Equal(t, Curve_P256, curve)
assert.Nil(t, err)
// Fail due to short key
k, rest, curve, err = UnmarshalPrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 private key")
// Fail due to invalid banner
k, rest, curve, err = UnmarshalPrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "bytes did not contain a proper nebula private key banner")
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, curve, err = UnmarshalPrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestUnmarshalEd25519PublicKey(t *testing.T) {
pubKey := []byte(`# A good key
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA ED25519 PUBLIC KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA ED25519 PUBLIC KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NOT A NEBULA PUBLIC KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA ED25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-END NEBULA ED25519 PUBLIC KEY-----`)
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, err := UnmarshalEd25519PublicKey(keyBundle)
assert.Equal(t, len(k), 32)
assert.Nil(t, err)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
// Fail due to short key
k, rest, err = UnmarshalEd25519PublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 32 bytes, is invalid ed25519 public key")
// Fail due to invalid banner
k, rest, err = UnmarshalEd25519PublicKey(rest)
assert.Nil(t, k)
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 public key banner")
assert.Equal(t, rest, invalidPem)
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, err = UnmarshalEd25519PublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestUnmarshalX25519PublicKey(t *testing.T) {
pubKey := []byte(`# A good key
-----BEGIN NEBULA X25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA X25519 PUBLIC KEY-----
`)
pubP256Key := []byte(`# A good key
-----BEGIN NEBULA P256 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA P256 PUBLIC KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA X25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA X25519 PUBLIC KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NOT A NEBULA PUBLIC KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA X25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-END NEBULA X25519 PUBLIC KEY-----`)
keyBundle := appendByteSlices(pubKey, pubP256Key, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, curve, err := UnmarshalPublicKey(keyBundle)
assert.Equal(t, len(k), 32)
assert.Nil(t, err)
assert.Equal(t, rest, appendByteSlices(pubP256Key, shortKey, invalidBanner, invalidPem))
assert.Equal(t, Curve_CURVE25519, curve)
// Success test case
k, rest, curve, err = UnmarshalPublicKey(rest)
assert.Equal(t, len(k), 65)
assert.Nil(t, err)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
assert.Equal(t, Curve_P256, curve)
// Fail due to short key
k, rest, curve, err = UnmarshalPublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
// Fail due to invalid banner
k, rest, curve, err = UnmarshalPublicKey(rest)
assert.Nil(t, k)
assert.EqualError(t, err, "bytes did not contain a proper nebula public key banner")
assert.Equal(t, rest, invalidPem)
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, curve, err = UnmarshalPublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
} }
// Ensure that upgrading the protobuf library does not change how certificates // Ensure that upgrading the protobuf library does not change how certificates
@@ -1046,14 +496,7 @@ func TestNebulaCertificate_Copy(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
cc := c.Copy() cc := c.Copy()
test.AssertDeepCopyEqual(t, c, cc) util.AssertDeepCopyEqual(t, c, cc)
}
func TestUnmarshalNebulaCertificate(t *testing.T) {
// Test that we don't panic with an invalid certificate (#332)
data := []byte("\x98\x00\x00")
_, err := UnmarshalNebulaCertificate(data)
assert.EqualError(t, err, "encoded Details was nil")
} }
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) { func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
@@ -1088,56 +531,13 @@ func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []
nc.Details.Groups = groups nc.Details.Groups = groups
} }
err = nc.Sign(Curve_CURVE25519, priv) err = nc.Sign(priv)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
return nc, pub, priv, nil return nc, pub, priv, nil
} }
func newTestCaCertP256(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
rawPriv := priv.D.FillBytes(make([]byte, 32))
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &NebulaCertificate{
Details: NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
Curve: Curve_P256,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = ips
}
if len(subnets) > 0 {
nc.Details.Subnets = subnets
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(Curve_P256, rawPriv)
if err != nil {
return nil, nil, nil, err
}
return nc, pub, rawPriv, nil
}
func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) { func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
issuer, err := ca.Sha256Sum() issuer, err := ca.Sha256Sum()
if err != nil { if err != nil {
@@ -1171,16 +571,7 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
} }
} }
var pub, rawPriv []byte pub, rawPriv := x25519Keypair()
switch ca.Details.Curve {
case Curve_CURVE25519:
pub, rawPriv = x25519Keypair()
case Curve_P256:
pub, rawPriv = p256Keypair()
default:
return nil, nil, nil, fmt.Errorf("unknown curve: %v", ca.Details.Curve)
}
nc := &NebulaCertificate{ nc := &NebulaCertificate{
Details: NebulaCertificateDetails{ Details: NebulaCertificateDetails{
@@ -1192,13 +583,12 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
NotAfter: time.Unix(after.Unix(), 0), NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub, PublicKey: pub,
IsCA: false, IsCA: false,
Curve: ca.Details.Curve,
Issuer: issuer, Issuer: issuer,
InvertedGroups: make(map[string]struct{}), InvertedGroups: make(map[string]struct{}),
}, },
} }
err = nc.Sign(ca.Details.Curve, key) err = nc.Sign(key)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -1207,24 +597,10 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
} }
func x25519Keypair() ([]byte, []byte) { func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32) var pubkey, privkey [32]byte
if _, err := io.ReadFull(rand.Reader, privkey); err != nil { if _, err := io.ReadFull(rand.Reader, privkey[:]); err != nil {
panic(err) panic(err)
} }
curve25519.ScalarBaseMult(&pubkey, &privkey)
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint) return pubkey[:], privkey[:]
if err != nil {
panic(err)
}
return pubkey, privkey
}
func p256Keypair() ([]byte, []byte) {
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
if err != nil {
panic(err)
}
pubkey := privkey.PublicKey()
return pubkey.Bytes(), privkey.Bytes()
} }

View File

@@ -1,140 +0,0 @@
package cert
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"golang.org/x/crypto/argon2"
)
// KDF factors
type Argon2Parameters struct {
version rune
Memory uint32 // KiB
Parallelism uint8
Iterations uint32
salt []byte
}
// Returns a new Argon2Parameters object with current version set
func NewArgon2Parameters(memory uint32, parallelism uint8, iterations uint32) *Argon2Parameters {
return &Argon2Parameters{
version: argon2.Version,
Memory: memory, // KiB
Parallelism: parallelism,
Iterations: iterations,
}
}
// Encrypts data using AES-256-GCM and the Argon2id key derivation function
func aes256Encrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) {
key, err := aes256DeriveKey(passphrase, kdfParams)
if err != nil {
return nil, err
}
// this should never happen, but since this dictates how our calls into the
// aes package behave and could be catastraphic, let's sanity check this
if len(key) != 32 {
return nil, fmt.Errorf("invalid AES-256 key length (%d) - cowardly refusing to encrypt", len(key))
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
nonce := make([]byte, gcm.NonceSize())
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return nil, err
}
ciphertext := gcm.Seal(nil, nonce, data, nil)
blob := joinNonceCiphertext(nonce, ciphertext)
return blob, nil
}
// Decrypts data using AES-256-GCM and the Argon2id key derivation function
// Expects the data to include an Argon2id parameter string before the encrypted data
func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) {
key, err := aes256DeriveKey(passphrase, kdfParams)
if err != nil {
return nil, err
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(block)
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
if err != nil {
return nil, err
}
plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
if err != nil {
return nil, fmt.Errorf("invalid passphrase or corrupt private key")
}
return plaintext, nil
}
func aes256DeriveKey(passphrase []byte, params *Argon2Parameters) ([]byte, error) {
if params.salt == nil {
params.salt = make([]byte, 32)
if _, err := rand.Read(params.salt); err != nil {
return nil, err
}
}
// keySize of 32 bytes will result in AES-256 encryption
key, err := deriveKey(passphrase, 32, params)
if err != nil {
return nil, err
}
return key, nil
}
// Derives a key from a passphrase using Argon2id
func deriveKey(passphrase []byte, keySize uint32, params *Argon2Parameters) ([]byte, error) {
if params.version != argon2.Version {
return nil, fmt.Errorf("incompatible Argon2 version: %d", params.version)
}
if params.salt == nil {
return nil, fmt.Errorf("salt must be set in argon2Parameters")
} else if len(params.salt) < 16 {
return nil, fmt.Errorf("salt must be at least 128 bits")
}
key := argon2.IDKey(passphrase, params.salt, params.Iterations, params.Memory, params.Parallelism, keySize)
return key, nil
}
// Prepends nonce to ciphertext
func joinNonceCiphertext(nonce []byte, ciphertext []byte) []byte {
return append(nonce, ciphertext...)
}
// Splits nonce from ciphertext
func splitNonceCiphertext(blob []byte, nonceSize int) ([]byte, []byte, error) {
if len(blob) <= nonceSize {
return nil, nil, fmt.Errorf("invalid ciphertext blob - blob shorter than nonce length")
}
return blob[:nonceSize], blob[nonceSize:], nil
}

View File

@@ -1,25 +0,0 @@
package cert
import (
"testing"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/argon2"
)
func TestNewArgon2Parameters(t *testing.T) {
p := NewArgon2Parameters(64*1024, 4, 3)
assert.EqualValues(t, &Argon2Parameters{
version: argon2.Version,
Memory: 64 * 1024,
Parallelism: 4,
Iterations: 3,
}, p)
p = NewArgon2Parameters(2*1024*1024, 2, 1)
assert.EqualValues(t, &Argon2Parameters{
version: argon2.Version,
Memory: 2 * 1024 * 1024,
Parallelism: 2,
Iterations: 1,
}, p)
}

View File

@@ -1,14 +0,0 @@
package cert
import (
"errors"
)
var (
ErrRootExpired = errors.New("root certificate is expired")
ErrExpired = errors.New("certificate is expired")
ErrNotCA = errors.New("certificate is not a CA")
ErrNotSelfSigned = errors.New("certificate is not self-signed")
ErrBlockListed = errors.New("certificate is in the block list")
ErrSignatureMismatch = errors.New("certificate signature did not match")
)

View File

@@ -1,10 +0,0 @@
package cidr
import "net"
// Parse is a convenience function that returns only the IPNet
// This function ignores errors since it is primarily a test helper, the result could be nil
func Parse(s string) *net.IPNet {
_, c, _ := net.ParseCIDR(s)
return c
}

View File

@@ -1,167 +0,0 @@
package cidr
import (
"net"
"testing"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
)
func TestCIDRTree_List(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
tree.AddCIDR(Parse("1.0.0.0/16"), "4")
list := tree.List()
assert.Len(t, list, 2)
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
assert.Equal(t, "2", *list[0].Value)
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
assert.Equal(t, "4", *list[1].Value)
}
func TestCIDRTree_Contains(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4a", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_MostSpecificContains(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
tree.AddCIDR(Parse("4.1.1.0/30"), "4b")
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_Match(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
tests := []struct {
Result interface{}
IP string
}{
{"1a", "4.1.1.0"},
{"1b", "4.1.1.1"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func BenchmarkCIDRTree_Contains(b *testing.B) {
tree := NewTree4()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
}
func BenchmarkCIDRTree_Match(b *testing.B) {
tree := NewTree4()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
}

View File

@@ -1,185 +0,0 @@
package cidr
import (
"net"
"github.com/slackhq/nebula/iputil"
)
const startbit6 = uint64(1 << 63)
type Tree6 struct {
root4 *Node
root6 *Node
}
func NewTree6() *Tree6 {
tree := new(Tree6)
tree.root4 = &Node{}
tree.root6 = &Node{}
return tree
}
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
var node, next *Node
cidrIP, ipv4 := isIPV4(cidr.IP)
if ipv4 {
node = tree.root4
next = tree.root4
} else {
node = tree.root6
next = tree.root6
}
for i := 0; i < len(cidrIP); i += 4 {
ip := iputil.Ip2VpnIp(cidrIP[i : i+4])
mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4])
bit := startbit
// Find our last ancestor in the tree
for bit&mask != 0 {
if ip&bit != 0 {
next = node.right
} else {
next = node.left
}
if next == nil {
break
}
bit = bit >> 1
node = next
}
// Build up the rest of the tree we don't already have
for bit&mask != 0 {
next = &Node{}
next.parent = node
if ip&bit != 0 {
node.right = next
} else {
node.left = next
}
bit >>= 1
node = next
}
}
// Final node marks our cidr, set the value
node.value = val
}
// Finds the most specific match
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
var node *Node
wholeIP, ipv4 := isIPV4(ip)
if ipv4 {
node = tree.root4
} else {
node = tree.root6
}
for i := 0; i < len(wholeIP); i += 4 {
ip := iputil.Ip2VpnIp(wholeIP[i : i+4])
bit := startbit
for node != nil {
if node.value != nil {
value = node.value
}
if bit == 0 {
break
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
}
return value
}
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
bit := startbit
node := tree.root4
for node != nil {
if node.value != nil {
value = node.value
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
return value
}
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
ip := hi
node := tree.root6
for i := 0; i < 2; i++ {
bit := startbit6
for node != nil {
if node.value != nil {
value = node.value
}
if bit == 0 {
break
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
ip = lo
}
return value
}
func isIPV4(ip net.IP) (net.IP, bool) {
if len(ip) == net.IPv4len {
return ip, true
}
if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff {
return ip[12:16], true
}
return ip, false
}
func isZeros(p net.IP) bool {
for i := 0; i < len(p); i++ {
if p[i] != 0 {
return false
}
}
return true
}

View File

@@ -1,81 +0,0 @@
package cidr
import (
"encoding/binary"
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
tree := NewTree6()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.1/24"), "4a")
tree.AddCIDR(Parse("4.1.1.1/30"), "4b")
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
}
tree = NewTree6()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
tree.AddCIDR(Parse("::/0"), "cool6")
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
}
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
tree := NewTree6()
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Result interface{}
IP string
}{
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
}
for _, tt := range tests {
ip := net.ParseIP(tt.IP)
hi := binary.BigEndian.Uint64(ip[:8])
lo := binary.BigEndian.Uint64(ip[8:])
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
}
}

View File

@@ -1,46 +1,39 @@
package cidr package nebula
import ( import (
"encoding/binary"
"fmt"
"net" "net"
"github.com/slackhq/nebula/iputil"
) )
type Node struct { type CIDRNode struct {
left *Node left *CIDRNode
right *Node right *CIDRNode
parent *Node parent *CIDRNode
value interface{} value interface{}
} }
type entry struct { type CIDRTree struct {
CIDR *net.IPNet root *CIDRNode
Value *interface{}
}
type Tree4 struct {
root *Node
list []entry
} }
const ( const (
startbit = iputil.VpnIp(0x80000000) startbit = uint32(0x80000000)
) )
func NewTree4() *Tree4 { func NewCIDRTree() *CIDRTree {
tree := new(Tree4) tree := new(CIDRTree)
tree.root = &Node{} tree.root = &CIDRNode{}
tree.list = []entry{}
return tree return tree
} }
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) { func (tree *CIDRTree) AddCIDR(cidr *net.IPNet, val interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
next := tree.root next := tree.root
ip := iputil.Ip2VpnIp(cidr.IP) ip := ip2int(cidr.IP)
mask := iputil.Ip2VpnIp(cidr.Mask) mask := ip2int(cidr.Mask)
// Find our last ancestor in the tree // Find our last ancestor in the tree
for bit&mask != 0 { for bit&mask != 0 {
@@ -60,22 +53,13 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
// We already have this range so update the value // We already have this range so update the value
if next != nil { if next != nil {
addCIDR := cidr.String()
for i, v := range tree.list {
if addCIDR == v.CIDR.String() {
tree.list = append(tree.list[:i], tree.list[i+1:]...)
break
}
}
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
node.value = val node.value = val
return return
} }
// Build up the rest of the tree we don't already have // Build up the rest of the tree we don't already have
for bit&mask != 0 { for bit&mask != 0 {
next = &Node{} next = &CIDRNode{}
next.parent = node next.parent = node
if ip&bit != 0 { if ip&bit != 0 {
@@ -90,11 +74,10 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
// Final node marks our cidr, set the value // Final node marks our cidr, set the value
node.value = val node.value = val
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
} }
// Contains finds the first match, which may be the least specific // Finds the first match, which way be the least specific
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) { func (tree *CIDRTree) Contains(ip uint32) (value interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
@@ -116,8 +99,8 @@ func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
return value return value
} }
// MostSpecificContains finds the most specific match // Finds the most specific match
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) { func (tree *CIDRTree) MostSpecificContains(ip uint32) (value interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
@@ -133,13 +116,14 @@ func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
} }
bit >>= 1 bit >>= 1
} }
return value return value
} }
// Match finds the most specific match // Finds the most specific match
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) { func (tree *CIDRTree) Match(ip uint32) (value interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
lastNode := node lastNode := node
@@ -161,7 +145,26 @@ func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
return value return value
} }
// List will return all CIDRs and their current values. Do not modify the contents! // A helper type to avoid converting to IP when logging
func (tree *Tree4) List() []entry { type IntIp uint32
return tree.list
func (ip IntIp) String() string {
return fmt.Sprintf("%v", int2ip(uint32(ip)))
}
func (ip IntIp) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", int2ip(uint32(ip)).String())), nil
}
func ip2int(ip []byte) uint32 {
if len(ip) == 16 {
return binary.BigEndian.Uint32(ip[12:16])
}
return binary.BigEndian.Uint32(ip)
}
func int2ip(nn uint32) net.IP {
ip := make(net.IP, 4)
binary.BigEndian.PutUint32(ip, nn)
return ip
} }

157
cidr_radix_test.go Normal file
View File

@@ -0,0 +1,157 @@
package nebula
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCIDRTree_Contains(t *testing.T) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.0.0.0/8"), "1")
tree.AddCIDR(getCIDR("2.1.0.0/16"), "2")
tree.AddCIDR(getCIDR("3.1.1.0/24"), "3")
tree.AddCIDR(getCIDR("4.1.1.0/24"), "4a")
tree.AddCIDR(getCIDR("4.1.1.1/32"), "4b")
tree.AddCIDR(getCIDR("4.1.2.1/32"), "4c")
tree.AddCIDR(getCIDR("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4a", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Contains(ip2int(net.ParseIP(tt.IP))))
}
tree = NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_MostSpecificContains(t *testing.T) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.0.0.0/8"), "1")
tree.AddCIDR(getCIDR("2.1.0.0/16"), "2")
tree.AddCIDR(getCIDR("3.1.1.0/24"), "3")
tree.AddCIDR(getCIDR("4.1.1.0/24"), "4a")
tree.AddCIDR(getCIDR("4.1.1.0/30"), "4b")
tree.AddCIDR(getCIDR("4.1.1.1/32"), "4c")
tree.AddCIDR(getCIDR("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(ip2int(net.ParseIP(tt.IP))))
}
tree = NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.MostSpecificContains(ip2int(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.MostSpecificContains(ip2int(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_Match(t *testing.T) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("4.1.1.0/32"), "1a")
tree.AddCIDR(getCIDR("4.1.1.1/32"), "1b")
tests := []struct {
Result interface{}
IP string
}{
{"1a", "4.1.1.0"},
{"1b", "4.1.1.1"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Match(ip2int(net.ParseIP(tt.IP))))
}
tree = NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("255.255.255.255"))))
}
func BenchmarkCIDRTree_Contains(b *testing.B) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.0.0/16"), "1")
tree.AddCIDR(getCIDR("1.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("192.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("172.2.1.1/32"), "1")
ip := ip2int(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
ip = ip2int(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
}
func BenchmarkCIDRTree_Match(b *testing.B) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.0.0/16"), "1")
tree.AddCIDR(getCIDR("1.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("192.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("172.2.1.1/32"), "1")
ip := ip2int(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
ip = ip2int(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
}
func getCIDR(s string) *net.IPNet {
_, c, _ := net.ParseCIDR(s)
return c
}

View File

@@ -1,20 +1,16 @@
package main package main
import ( import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand" "crypto/rand"
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math"
"net" "net"
"os" "os"
"strings" "strings"
"time" "time"
"github.com/skip2/go-qrcode"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"golang.org/x/crypto/ed25519" "golang.org/x/crypto/ed25519"
) )
@@ -25,16 +21,9 @@ type caFlags struct {
duration *time.Duration duration *time.Duration
outKeyPath *string outKeyPath *string
outCertPath *string outCertPath *string
outQRPath *string
groups *string groups *string
ips *string ips *string
subnets *string subnets *string
argonMemory *uint
argonIterations *uint
argonParallelism *uint
encryption *bool
curve *string
} }
func newCaFlags() *caFlags { func newCaFlags() *caFlags {
@@ -44,33 +33,13 @@ func newCaFlags() *caFlags {
cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"") cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to") cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to")
cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to") cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to")
cf.outQRPath = cf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use") cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses") cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use")
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets") cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use")
cf.argonMemory = cf.set.Uint("argon-memory", 2*1024*1024, "Optional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase")
cf.argonParallelism = cf.set.Uint("argon-parallelism", 4, "Optional: Argon2 parallelism parameter used for encrypted private key passphrase")
cf.argonIterations = cf.set.Uint("argon-iterations", 1, "Optional: Argon2 iterations parameter used for encrypted private key passphrase")
cf.encryption = cf.set.Bool("encrypt", false, "Optional: prompt for passphrase and write out-key in an encrypted format")
cf.curve = cf.set.String("curve", "25519", "EdDSA/ECDSA Curve (25519, P256)")
return &cf return &cf
} }
func parseArgonParameters(memory uint, parallelism uint, iterations uint) (*cert.Argon2Parameters, error) { func ca(args []string, out io.Writer, errOut io.Writer) error {
if memory <= 0 || memory > math.MaxUint32 {
return nil, newHelpErrorf("-argon-memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
}
if parallelism <= 0 || parallelism > math.MaxUint8 {
return nil, newHelpErrorf("-argon-parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
}
if iterations <= 0 || iterations > math.MaxUint32 {
return nil, newHelpErrorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
}
return cert.NewArgon2Parameters(uint32(memory), uint8(parallelism), uint32(iterations)), nil
}
func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
cf := newCaFlags() cf := newCaFlags()
err := cf.set.Parse(args) err := cf.set.Parse(args)
if err != nil { if err != nil {
@@ -86,12 +55,6 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
if err := mustFlagString("out-crt", cf.outCertPath); err != nil { if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
return err return err
} }
var kdfParams *cert.Argon2Parameters
if *cf.encryption {
if kdfParams, err = parseArgonParameters(*cf.argonMemory, *cf.argonParallelism, *cf.argonIterations); err != nil {
return err
}
}
if *cf.duration <= 0 { if *cf.duration <= 0 {
return &helpError{"-duration must be greater than 0"} return &helpError{"-duration must be greater than 0"}
@@ -116,9 +79,6 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
if err != nil { if err != nil {
return newHelpErrorf("invalid ip definition: %s", err) return newHelpErrorf("invalid ip definition: %s", err)
} }
if ip.To4() == nil {
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", rs)
}
ipNet.IP = ip ipNet.IP = ip
ips = append(ips, ipNet) ips = append(ips, ipNet)
@@ -135,56 +95,15 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
if err != nil { if err != nil {
return newHelpErrorf("invalid subnet definition: %s", err) return newHelpErrorf("invalid subnet definition: %s", err)
} }
if s.IP.To4() == nil {
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
}
subnets = append(subnets, s) subnets = append(subnets, s)
} }
} }
} }
var passphrase []byte pub, rawPriv, err := ed25519.GenerateKey(rand.Reader)
if *cf.encryption {
for i := 0; i < 5; i++ {
out.Write([]byte("Enter passphrase: "))
passphrase, err = pr.ReadPassword()
if err == ErrNoTerminal {
return fmt.Errorf("out-key must be encrypted interactively")
} else if err != nil {
return fmt.Errorf("error reading passphrase: %s", err)
}
if len(passphrase) > 0 {
break
}
}
if len(passphrase) == 0 {
return fmt.Errorf("no passphrase specified, remove -encrypt flag to write out-key in plaintext")
}
}
var curve cert.Curve
var pub, rawPriv []byte
switch *cf.curve {
case "25519", "X25519", "Curve25519", "CURVE25519":
curve = cert.Curve_CURVE25519
pub, rawPriv, err = ed25519.GenerateKey(rand.Reader)
if err != nil { if err != nil {
return fmt.Errorf("error while generating ed25519 keys: %s", err) return fmt.Errorf("error while generating ed25519 keys: %s", err)
} }
case "P256":
var key *ecdsa.PrivateKey
curve = cert.Curve_P256
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return fmt.Errorf("error while generating ecdsa keys: %s", err)
}
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
rawPriv = key.D.FillBytes(make([]byte, 32))
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y)
}
nc := cert.NebulaCertificate{ nc := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
@@ -196,7 +115,6 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
NotAfter: time.Now().Add(*cf.duration), NotAfter: time.Now().Add(*cf.duration),
PublicKey: pub, PublicKey: pub,
IsCA: true, IsCA: true,
Curve: curve,
}, },
} }
@@ -208,22 +126,12 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath) return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
} }
err = nc.Sign(curve, rawPriv) err = nc.Sign(rawPriv)
if err != nil { if err != nil {
return fmt.Errorf("error while signing: %s", err) return fmt.Errorf("error while signing: %s", err)
} }
if *cf.encryption { err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalEd25519PrivateKey(rawPriv), 0600)
b, err := cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
if err != nil {
return fmt.Errorf("error while encrypting out-key: %s", err)
}
err = ioutil.WriteFile(*cf.outKeyPath, b, 0600)
} else {
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalSigningPrivateKey(curve, rawPriv), 0600)
}
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-key: %s", err) return fmt.Errorf("error while writing out-key: %s", err)
} }
@@ -238,18 +146,6 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
return fmt.Errorf("error while writing out-crt: %s", err) return fmt.Errorf("error while writing out-crt: %s", err)
} }
if *cf.outQRPath != "" {
b, err = qrcode.Encode(string(b), qrcode.Medium, -5)
if err != nil {
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}
}
return nil return nil
} }

View File

@@ -1,15 +1,11 @@
//go:build !windows
// +build !windows // +build !windows
package main package main
import ( import (
"bytes" "bytes"
"encoding/pem"
"errors"
"io/ioutil" "io/ioutil"
"os" "os"
"strings"
"testing" "testing"
"time" "time"
@@ -29,32 +25,20 @@ func Test_caHelp(t *testing.T) {
assert.Equal( assert.Equal(
t, t,
"Usage of "+os.Args[0]+" ca <flags>: create a self signed certificate authority\n"+ "Usage of "+os.Args[0]+" ca <flags>: create a self signed certificate authority\n"+
" -argon-iterations uint\n"+
" \tOptional: Argon2 iterations parameter used for encrypted private key passphrase (default 1)\n"+
" -argon-memory uint\n"+
" \tOptional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase (default 2097152)\n"+
" -argon-parallelism uint\n"+
" \tOptional: Argon2 parallelism parameter used for encrypted private key passphrase (default 4)\n"+
" -curve string\n"+
" \tEdDSA/ECDSA Curve (25519, P256) (default \"25519\")\n"+
" -duration duration\n"+ " -duration duration\n"+
" \tOptional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\" (default 8760h0m0s)\n"+ " \tOptional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\" (default 8760h0m0s)\n"+
" -encrypt\n"+
" \tOptional: prompt for passphrase and write out-key in an encrypted format\n"+
" -groups string\n"+ " -groups string\n"+
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+ " \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
" -ips string\n"+ " -ips string\n"+
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses\n"+ " \tOptional: comma separated list of ip and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use\n"+
" -name string\n"+ " -name string\n"+
" \tRequired: name of the certificate authority\n"+ " \tRequired: name of the certificate authority\n"+
" -out-crt string\n"+ " -out-crt string\n"+
" \tOptional: path to write the certificate to (default \"ca.crt\")\n"+ " \tOptional: path to write the certificate to (default \"ca.crt\")\n"+
" -out-key string\n"+ " -out-key string\n"+
" \tOptional: path to write the private key to (default \"ca.key\")\n"+ " \tOptional: path to write the private key to (default \"ca.key\")\n"+
" -out-qr string\n"+
" \tOptional: output a qr code image (png) of the certificate\n"+
" -subnets string\n"+ " -subnets string\n"+
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets\n", " \tOptional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use\n",
ob.String(), ob.String(),
) )
} }
@@ -63,38 +47,8 @@ func Test_ca(t *testing.T) {
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
eb := &bytes.Buffer{} eb := &bytes.Buffer{}
nopw := &StubPasswordReader{
password: []byte(""),
err: nil,
}
errpw := &StubPasswordReader{
password: []byte(""),
err: errors.New("stub error"),
}
passphrase := []byte("DO NOT USE THIS KEY")
testpw := &StubPasswordReader{
password: passphrase,
err: nil,
}
pwPromptOb := "Enter passphrase: "
// required args // required args
assertHelpError(t, ca( assertHelpError(t, ca([]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb), "-name is required")
[]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
), "-name is required")
assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String())
// ipv4 only ips
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String())
// ipv4 only subnets
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
@@ -102,7 +56,7 @@ func Test_ca(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"} args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError) assert.EqualError(t, ca(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
@@ -115,7 +69,7 @@ func Test_ca(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()} args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError) assert.EqualError(t, ca(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
@@ -129,7 +83,7 @@ func Test_ca(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
assert.Nil(t, ca(args, ob, eb, nopw)) assert.Nil(t, ca(args, ob, eb))
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
@@ -155,67 +109,19 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", lCrt.Details.Issuer) assert.Equal(t, "", lCrt.Details.Issuer)
assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey)) assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey))
// test encrypted key
os.Remove(keyF.Name())
os.Remove(crtF.Name())
ob.Reset()
eb.Reset()
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
assert.Nil(t, ca(args, ob, eb, testpw))
assert.Equal(t, pwPromptOb, ob.String())
assert.Equal(t, "", eb.String())
// read encrypted key file and verify default params
rb, _ = ioutil.ReadFile(keyF.Name())
k, _ := pem.Decode(rb)
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
assert.Nil(t, err)
// we won't know salt in advance, so just check start of string
assert.Equal(t, uint32(2*1024*1024), ned.EncryptionMetadata.Argon2Parameters.Memory)
assert.Equal(t, uint8(4), ned.EncryptionMetadata.Argon2Parameters.Parallelism)
assert.Equal(t, uint32(1), ned.EncryptionMetadata.Argon2Parameters.Iterations)
// verify the key is valid and decrypt-able
var curve cert.Curve
curve, lKey, b, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rb)
assert.Equal(t, cert.Curve_CURVE25519, curve)
assert.Nil(t, err)
assert.Len(t, b, 0)
assert.Len(t, lKey, 64)
// test when reading passsword results in an error
os.Remove(keyF.Name())
os.Remove(crtF.Name())
ob.Reset()
eb.Reset()
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
assert.Error(t, ca(args, ob, eb, errpw))
assert.Equal(t, pwPromptOb, ob.String())
assert.Equal(t, "", eb.String())
// test when user fails to enter a password
os.Remove(keyF.Name())
os.Remove(crtF.Name())
ob.Reset()
eb.Reset()
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
assert.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
assert.Equal(t, strings.Repeat(pwPromptOb, 5), ob.String()) // prompts 5 times before giving up
assert.Equal(t, "", eb.String())
// create valid cert/key for overwrite tests // create valid cert/key for overwrite tests
os.Remove(keyF.Name()) os.Remove(keyF.Name())
os.Remove(crtF.Name()) os.Remove(crtF.Name())
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
assert.Nil(t, ca(args, ob, eb, nopw)) assert.Nil(t, ca(args, ob, eb))
// test that we won't overwrite existing certificate file // test that we won't overwrite existing certificate file
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name()) assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA key: "+keyF.Name())
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
@@ -224,7 +130,7 @@ func Test_ca(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name()) assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA cert: "+crtF.Name())
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
os.Remove(keyF.Name()) os.Remove(keyF.Name())

View File

@@ -14,8 +14,6 @@ type keygenFlags struct {
set *flag.FlagSet set *flag.FlagSet
outKeyPath *string outKeyPath *string
outPubPath *string outPubPath *string
curve *string
} }
func newKeygenFlags() *keygenFlags { func newKeygenFlags() *keygenFlags {
@@ -23,7 +21,6 @@ func newKeygenFlags() *keygenFlags {
cf.set.Usage = func() {} cf.set.Usage = func() {}
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to") cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to") cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
cf.curve = cf.set.String("curve", "25519", "ECDH Curve (25519, P256)")
return &cf return &cf
} }
@@ -41,25 +38,14 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
return err return err
} }
var pub, rawPriv []byte pub, rawPriv := x25519Keypair()
var curve cert.Curve
switch *cf.curve {
case "25519", "X25519", "Curve25519", "CURVE25519":
pub, rawPriv = x25519Keypair()
curve = cert.Curve_CURVE25519
case "P256":
pub, rawPriv = p256Keypair()
curve = cert.Curve_P256
default:
return fmt.Errorf("invalid curve: %s", *cf.curve)
}
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600) err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-key: %s", err) return fmt.Errorf("error while writing out-key: %s", err)
} }
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600) err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalX25519PublicKey(pub), 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-pub: %s", err) return fmt.Errorf("error while writing out-pub: %s", err)
} }

View File

@@ -22,8 +22,6 @@ func Test_keygenHelp(t *testing.T) {
assert.Equal( assert.Equal(
t, t,
"Usage of "+os.Args[0]+" keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`\n"+ "Usage of "+os.Args[0]+" keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`\n"+
" -curve string\n"+
" \tECDH Curve (25519, P256) (default \"25519\")\n"+
" -out-key string\n"+ " -out-key string\n"+
" \tRequired: path to write the private key to\n"+ " \tRequired: path to write the private key to\n"+
" -out-pub string\n"+ " -out-pub string\n"+

View File

@@ -62,11 +62,11 @@ func main() {
switch args[0] { switch args[0] {
case "ca": case "ca":
err = ca(args[1:], os.Stdout, os.Stderr, StdinPasswordReader{}) err = ca(args[1:], os.Stdout, os.Stderr)
case "keygen": case "keygen":
err = keygen(args[1:], os.Stdout, os.Stderr) err = keygen(args[1:], os.Stdout, os.Stderr)
case "sign": case "sign":
err = signCert(args[1:], os.Stdout, os.Stderr, StdinPasswordReader{}) err = signCert(args[1:], os.Stdout, os.Stderr)
case "print": case "print":
err = printCert(args[1:], os.Stdout, os.Stderr) err = printCert(args[1:], os.Stdout, os.Stderr)
case "verify": case "verify":
@@ -127,8 +127,6 @@ func help(err string, out io.Writer) {
fmt.Fprintln(out, " "+signSummary()) fmt.Fprintln(out, " "+signSummary())
fmt.Fprintln(out, " "+printSummary()) fmt.Fprintln(out, " "+printSummary())
fmt.Fprintln(out, " "+verifySummary()) fmt.Fprintln(out, " "+verifySummary())
fmt.Fprintln(out, "")
fmt.Fprintf(out, " To see usage for a given mode, use %s <mode> -h\n", os.Args[0])
} }
func mustFlagString(name string, val *string) error { func mustFlagString(name string, val *string) error {

View File

@@ -22,9 +22,7 @@ func Test_help(t *testing.T) {
" " + keygenSummary() + "\n" + " " + keygenSummary() + "\n" +
" " + signSummary() + "\n" + " " + signSummary() + "\n" +
" " + printSummary() + "\n" + " " + printSummary() + "\n" +
" " + verifySummary() + "\n" + " " + verifySummary() + "\n"
"\n" +
" To see usage for a given mode, use " + os.Args[0] + " <mode> -h\n"
ob := &bytes.Buffer{} ob := &bytes.Buffer{}

View File

@@ -1,28 +0,0 @@
package main
import (
"errors"
"fmt"
"os"
"golang.org/x/term"
)
var ErrNoTerminal = errors.New("cannot read password from nonexistent terminal")
type PasswordReader interface {
ReadPassword() ([]byte, error)
}
type StdinPasswordReader struct{}
func (pr StdinPasswordReader) ReadPassword() ([]byte, error) {
if !term.IsTerminal(int(os.Stdin.Fd())) {
return nil, ErrNoTerminal
}
password, err := term.ReadPassword(int(os.Stdin.Fd()))
fmt.Println()
return password, err
}

View File

@@ -1,10 +0,0 @@
package main
type StubPasswordReader struct {
password []byte
err error
}
func (pr *StubPasswordReader) ReadPassword() ([]byte, error) {
return pr.password, pr.err
}

View File

@@ -9,14 +9,12 @@ import (
"os" "os"
"strings" "strings"
"github.com/skip2/go-qrcode"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
) )
type printFlags struct { type printFlags struct {
set *flag.FlagSet set *flag.FlagSet
json *bool json *bool
outQRPath *string
path *string path *string
} }
@@ -24,7 +22,6 @@ func newPrintFlags() *printFlags {
pf := printFlags{set: flag.NewFlagSet("print", flag.ContinueOnError)} pf := printFlags{set: flag.NewFlagSet("print", flag.ContinueOnError)}
pf.set.Usage = func() {} pf.set.Usage = func() {}
pf.json = pf.set.Bool("json", false, "Optional: outputs certificates in json format") pf.json = pf.set.Bool("json", false, "Optional: outputs certificates in json format")
pf.outQRPath = pf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
pf.path = pf.set.String("path", "", "Required: path to the certificate") pf.path = pf.set.String("path", "", "Required: path to the certificate")
return &pf return &pf
@@ -47,8 +44,6 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
} }
var c *cert.NebulaCertificate var c *cert.NebulaCertificate
var qrBytes []byte
part := 0
for { for {
c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert) c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert)
@@ -66,31 +61,9 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
out.Write([]byte("\n")) out.Write([]byte("\n"))
} }
if *pf.outQRPath != "" {
b, err := c.MarshalToPEM()
if err != nil {
return fmt.Errorf("error while marshalling cert to PEM: %s", err)
}
qrBytes = append(qrBytes, b...)
}
if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" { if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" {
break break
} }
part++
}
if *pf.outQRPath != "" {
b, err := qrcode.Encode(string(qrBytes), qrcode.Medium, -5)
if err != nil {
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}
} }
return nil return nil

View File

@@ -23,8 +23,6 @@ func Test_printHelp(t *testing.T) {
"Usage of "+os.Args[0]+" print <flags>: prints details about a certificate\n"+ "Usage of "+os.Args[0]+" print <flags>: prints details about a certificate\n"+
" -json\n"+ " -json\n"+
" \tOptional: outputs certificates in json format\n"+ " \tOptional: outputs certificates in json format\n"+
" -out-qr string\n"+
" \tOptional: output a qr code image (png) of the certificate\n"+
" -path string\n"+ " -path string\n"+
" \tRequired: path to the certificate\n", " \tRequired: path to the certificate\n",
ob.String(), ob.String(),
@@ -87,7 +85,7 @@ func Test_printCert(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal( assert.Equal(
t, t,
"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n", "NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n",
ob.String(), ob.String(),
) )
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
@@ -115,7 +113,7 @@ func Test_printCert(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal( assert.Equal(
t, t,
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n", "{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n",
ob.String(), ob.String(),
) )
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())

View File

@@ -1,7 +1,6 @@
package main package main
import ( import (
"crypto/ecdh"
"crypto/rand" "crypto/rand"
"flag" "flag"
"fmt" "fmt"
@@ -12,7 +11,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/skip2/go-qrcode"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
) )
@@ -27,7 +25,6 @@ type signFlags struct {
inPubPath *string inPubPath *string
outKeyPath *string outKeyPath *string
outCertPath *string outCertPath *string
outQRPath *string
groups *string groups *string
subnets *string subnets *string
} }
@@ -38,19 +35,18 @@ func newSignFlags() *signFlags {
sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key") sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key")
sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert") sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert")
sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname") sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname")
sf.ip = sf.set.String("ip", "", "Required: ipv4 address and network in CIDR notation to assign the cert") sf.ip = sf.set.String("ip", "", "Required: ip and network in CIDR notation to assign the cert")
sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"") sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key") sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key")
sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to") sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to")
sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to") sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to")
sf.outQRPath = sf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups") sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups")
sf.subnets = sf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for") sf.subnets = sf.set.String("subnets", "", "Optional: comma seperated list of subnet this cert can serve for")
return &sf return &sf
} }
func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error { func signCert(args []string, out io.Writer, errOut io.Writer) error {
sf := newSignFlags() sf := newSignFlags()
err := sf.set.Parse(args) err := sf.set.Parse(args)
if err != nil { if err != nil {
@@ -78,37 +74,8 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while reading ca-key: %s", err) return fmt.Errorf("error while reading ca-key: %s", err)
} }
var curve cert.Curve caKey, _, err := cert.UnmarshalEd25519PrivateKey(rawCAKey)
var caKey []byte
// naively attempt to decode the private key as though it is not encrypted
caKey, _, curve, err = cert.UnmarshalSigningPrivateKey(rawCAKey)
if err == cert.ErrPrivateKeyEncrypted {
// ask for a passphrase until we get one
var passphrase []byte
for i := 0; i < 5; i++ {
out.Write([]byte("Enter passphrase: "))
passphrase, err = pr.ReadPassword()
if err == ErrNoTerminal {
return fmt.Errorf("ca-key is encrypted and must be decrypted interactively")
} else if err != nil {
return fmt.Errorf("error reading password: %s", err)
}
if len(passphrase) > 0 {
break
}
}
if len(passphrase) == 0 {
return fmt.Errorf("cannot open encrypted ca-key without passphrase")
}
curve, caKey, _, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rawCAKey)
if err != nil { if err != nil {
return fmt.Errorf("error while parsing encrypted ca-key: %s", err)
}
} else if err != nil {
return fmt.Errorf("error while parsing ca-key: %s", err) return fmt.Errorf("error while parsing ca-key: %s", err)
} }
@@ -122,10 +89,6 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while parsing ca-crt: %s", err) return fmt.Errorf("error while parsing ca-crt: %s", err)
} }
if err := caCert.VerifyPrivateKey(curve, caKey); err != nil {
return fmt.Errorf("refusing to sign, root certificate does not match private key")
}
issuer, err := caCert.Sha256Sum() issuer, err := caCert.Sha256Sum()
if err != nil { if err != nil {
return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err) return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err)
@@ -144,9 +107,6 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
if err != nil { if err != nil {
return newHelpErrorf("invalid ip definition: %s", err) return newHelpErrorf("invalid ip definition: %s", err)
} }
if ip.To4() == nil {
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", *sf.ip)
}
ipNet.IP = ip ipNet.IP = ip
groups := []string{} groups := []string{}
@@ -168,9 +128,6 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
if err != nil { if err != nil {
return newHelpErrorf("invalid subnet definition: %s", err) return newHelpErrorf("invalid subnet definition: %s", err)
} }
if s.IP.To4() == nil {
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
}
subnets = append(subnets, s) subnets = append(subnets, s)
} }
} }
@@ -182,16 +139,12 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
if err != nil { if err != nil {
return fmt.Errorf("error while reading in-pub: %s", err) return fmt.Errorf("error while reading in-pub: %s", err)
} }
var pubCurve cert.Curve pub, _, err = cert.UnmarshalX25519PublicKey(rawPub)
pub, _, pubCurve, err = cert.UnmarshalPublicKey(rawPub)
if err != nil { if err != nil {
return fmt.Errorf("error while parsing in-pub: %s", err) return fmt.Errorf("error while parsing in-pub: %s", err)
} }
if pubCurve != curve {
return fmt.Errorf("curve of in-pub does not match ca")
}
} else { } else {
pub, rawPriv = newKeypair(curve) pub, rawPriv = x25519Keypair()
} }
nc := cert.NebulaCertificate{ nc := cert.NebulaCertificate{
@@ -205,7 +158,6 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
PublicKey: pub, PublicKey: pub,
IsCA: false, IsCA: false,
Issuer: issuer, Issuer: issuer,
Curve: curve,
}, },
} }
@@ -225,7 +177,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath) return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
} }
err = nc.Sign(curve, caKey) err = nc.Sign(caKey)
if err != nil { if err != nil {
return fmt.Errorf("error while signing: %s", err) return fmt.Errorf("error while signing: %s", err)
} }
@@ -235,7 +187,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath) return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
} }
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600) err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-key: %s", err) return fmt.Errorf("error while writing out-key: %s", err)
} }
@@ -251,53 +203,16 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while writing out-crt: %s", err) return fmt.Errorf("error while writing out-crt: %s", err)
} }
if *sf.outQRPath != "" {
b, err = qrcode.Encode(string(b), qrcode.Medium, -5)
if err != nil {
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}
}
return nil return nil
} }
func newKeypair(curve cert.Curve) ([]byte, []byte) {
switch curve {
case cert.Curve_CURVE25519:
return x25519Keypair()
case cert.Curve_P256:
return p256Keypair()
default:
return nil, nil
}
}
func x25519Keypair() ([]byte, []byte) { func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32) var pubkey, privkey [32]byte
if _, err := io.ReadFull(rand.Reader, privkey); err != nil { if _, err := io.ReadFull(rand.Reader, privkey[:]); err != nil {
panic(err) panic(err)
} }
curve25519.ScalarBaseMult(&pubkey, &privkey)
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint) return pubkey[:], privkey[:]
if err != nil {
panic(err)
}
return pubkey, privkey
}
func p256Keypair() ([]byte, []byte) {
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
if err != nil {
panic(err)
}
pubkey := privkey.PublicKey()
return pubkey.Bytes(), privkey.Bytes()
} }
func signSummary() string { func signSummary() string {

View File

@@ -1,4 +1,3 @@
//go:build !windows
// +build !windows // +build !windows
package main package main
@@ -6,7 +5,6 @@ package main
import ( import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"errors"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
@@ -40,17 +38,15 @@ func Test_signHelp(t *testing.T) {
" -in-pub string\n"+ " -in-pub string\n"+
" \tOptional (if out-key not set): path to read a previously generated public key\n"+ " \tOptional (if out-key not set): path to read a previously generated public key\n"+
" -ip string\n"+ " -ip string\n"+
" \tRequired: ipv4 address and network in CIDR notation to assign the cert\n"+ " \tRequired: ip and network in CIDR notation to assign the cert\n"+
" -name string\n"+ " -name string\n"+
" \tRequired: name of the cert, usually a hostname\n"+ " \tRequired: name of the cert, usually a hostname\n"+
" -out-crt string\n"+ " -out-crt string\n"+
" \tOptional: path to write the certificate to\n"+ " \tOptional: path to write the certificate to\n"+
" -out-key string\n"+ " -out-key string\n"+
" \tOptional (if in-pub not set): path to write the private key to\n"+ " \tOptional (if in-pub not set): path to write the private key to\n"+
" -out-qr string\n"+
" \tOptional: output a qr code image (png) of the certificate\n"+
" -subnets string\n"+ " -subnets string\n"+
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for\n", " \tOptional: comma seperated list of subnet this cert can serve for\n",
ob.String(), ob.String(),
) )
} }
@@ -59,39 +55,18 @@ func Test_signCert(t *testing.T) {
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
eb := &bytes.Buffer{} eb := &bytes.Buffer{}
nopw := &StubPasswordReader{
password: []byte(""),
err: nil,
}
errpw := &StubPasswordReader{
password: []byte(""),
err: errors.New("stub error"),
}
passphrase := []byte("DO NOT USE THIS KEY")
testpw := &StubPasswordReader{
password: passphrase,
err: nil,
}
// required args // required args
assertHelpError(t, signCert(
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw, assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-name is required")
), "-name is required")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
assertHelpError(t, signCert( assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-ip is required")
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
), "-ip is required")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
// cannot set -in-pub and -out-key // cannot set -in-pub and -out-key
assertHelpError(t, signCert( assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb), "cannot set both -in-pub and -out-key")
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb, nopw,
), "cannot set both -in-pub and -out-key")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -99,7 +74,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-key: open ./nope: "+NoSuchFileError) assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-key: open ./nope: "+NoSuchFileError)
// failed to unmarshal key // failed to unmarshal key
ob.Reset() ob.Reset()
@@ -109,7 +84,7 @@ func Test_signCert(t *testing.T) {
defer os.Remove(caKeyF.Name()) defer os.Remove(caKeyF.Name())
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-key: input did not contain a valid PEM encoded block") assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-key: input did not contain a valid PEM encoded block")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -121,7 +96,7 @@ func Test_signCert(t *testing.T) {
// failed to read cert // failed to read cert
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-crt: open ./nope: "+NoSuchFileError) assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -133,7 +108,7 @@ func Test_signCert(t *testing.T) {
defer os.Remove(caCrtF.Name()) defer os.Remove(caCrtF.Name())
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-crt: input did not contain a valid PEM encoded block") assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -152,7 +127,7 @@ func Test_signCert(t *testing.T) {
// failed to read pub // failed to read pub
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading in-pub: open ./nope: "+NoSuchFileError) assert.EqualError(t, signCert(args, ob, eb), "error while reading in-pub: open ./nope: "+NoSuchFileError)
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -164,7 +139,7 @@ func Test_signCert(t *testing.T) {
defer os.Remove(inPubF.Name()) defer os.Remove(inPubF.Name())
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing in-pub: input did not contain a valid PEM encoded block") assert.EqualError(t, signCert(args, ob, eb), "error while parsing in-pub: input did not contain a valid PEM encoded block")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -178,14 +153,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: invalid CIDR address: a1.1.1.1/24") assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: invalid CIDR address: a1.1.1.1/24")
assert.Empty(t, ob.String())
assert.Empty(t, eb.String())
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -193,28 +161,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: invalid CIDR address: a") assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: invalid CIDR address: a")
assert.Empty(t, ob.String())
assert.Empty(t, eb.String())
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
assert.Empty(t, ob.String())
assert.Empty(t, eb.String())
// mismatched ca key
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
assert.Nil(t, err)
defer os.Remove(caKeyF2.Name())
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate does not match private key")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -222,7 +169,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError) assert.EqualError(t, signCert(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -235,7 +182,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError) assert.EqualError(t, signCert(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
os.Remove(keyF.Name()) os.Remove(keyF.Name())
@@ -249,7 +196,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.Nil(t, signCert(args, ob, eb, nopw)) assert.Nil(t, signCert(args, ob, eb))
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -291,7 +238,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
assert.Nil(t, signCert(args, ob, eb, nopw)) assert.Nil(t, signCert(args, ob, eb))
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -306,7 +253,7 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate") assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -314,14 +261,14 @@ func Test_signCert(t *testing.T) {
os.Remove(keyF.Name()) os.Remove(keyF.Name())
os.Remove(crtF.Name()) os.Remove(crtF.Name())
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.Nil(t, signCert(args, ob, eb, nopw)) assert.Nil(t, signCert(args, ob, eb))
// test that we won't overwrite existing key file // test that we won't overwrite existing key file
os.Remove(crtF.Name()) os.Remove(crtF.Name())
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing key: "+keyF.Name()) assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing key: "+keyF.Name())
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -329,83 +276,15 @@ func Test_signCert(t *testing.T) {
os.Remove(keyF.Name()) os.Remove(keyF.Name())
os.Remove(crtF.Name()) os.Remove(crtF.Name())
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.Nil(t, signCert(args, ob, eb, nopw)) assert.Nil(t, signCert(args, ob, eb))
// test that we won't overwrite existing certificate file // test that we won't overwrite existing certificate file
os.Remove(keyF.Name()) os.Remove(keyF.Name())
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing cert: "+crtF.Name()) assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name())
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
// create valid cert/key using encrypted CA key
os.Remove(caKeyF.Name())
os.Remove(caCrtF.Name())
os.Remove(keyF.Name())
os.Remove(crtF.Name())
ob.Reset()
eb.Reset()
caKeyF, err = ioutil.TempFile("", "sign-cert.key")
assert.Nil(t, err)
defer os.Remove(caKeyF.Name())
caCrtF, err = ioutil.TempFile("", "sign-cert.crt")
assert.Nil(t, err)
defer os.Remove(caCrtF.Name())
// generate the encrypted key
caPub, caPriv, _ = ed25519.GenerateKey(rand.Reader)
kdfParams := cert.NewArgon2Parameters(64*1024, 4, 3)
b, _ = cert.EncryptAndMarshalSigningPrivateKey(cert.Curve_CURVE25519, caPriv, passphrase, kdfParams)
caKeyF.Write(b)
ca = cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "ca",
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Minute * 200),
PublicKey: caPub,
IsCA: true,
},
}
b, _ = ca.MarshalToPEM()
caCrtF.Write(b)
// test with the proper password
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.Nil(t, signCert(args, ob, eb, testpw))
assert.Equal(t, "Enter passphrase: ", ob.String())
assert.Empty(t, eb.String())
// test with the wrong password
ob.Reset()
eb.Reset()
testpw.password = []byte("invalid password")
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.Error(t, signCert(args, ob, eb, testpw))
assert.Equal(t, "Enter passphrase: ", ob.String())
assert.Empty(t, eb.String())
// test with the user not entering a password
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.Error(t, signCert(args, ob, eb, nopw))
// normally the user hitting enter on the prompt would add newlines between these
assert.Equal(t, "Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: ", ob.String())
assert.Empty(t, eb.String())
// test an error condition
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
assert.Error(t, signCert(args, ob, eb, errpw))
assert.Equal(t, "Enter passphrase: ", ob.String())
assert.Empty(t, eb.String())
} }

View File

@@ -72,12 +72,12 @@ func Test_verify(t *testing.T) {
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
Name: "test-ca", Name: "test-ca",
NotBefore: time.Now().Add(time.Hour * -1), NotBefore: time.Now().Add(time.Hour * -1),
NotAfter: time.Now().Add(time.Hour * 2), NotAfter: time.Now().Add(time.Hour),
PublicKey: caPub, PublicKey: caPub,
IsCA: true, IsCA: true,
}, },
} }
ca.Sign(cert.Curve_CURVE25519, caPriv) ca.Sign(caPriv)
b, _ := ca.MarshalToPEM() b, _ := ca.MarshalToPEM()
caFile.Truncate(0) caFile.Truncate(0)
caFile.Seek(0, 0) caFile.Seek(0, 0)
@@ -117,7 +117,7 @@ func Test_verify(t *testing.T) {
}, },
} }
crt.Sign(cert.Curve_CURVE25519, badPriv) crt.Sign(badPriv)
b, _ = crt.MarshalToPEM() b, _ = crt.MarshalToPEM()
certFile.Truncate(0) certFile.Truncate(0)
certFile.Seek(0, 0) certFile.Seek(0, 0)
@@ -129,7 +129,7 @@ func Test_verify(t *testing.T) {
assert.EqualError(t, err, "certificate signature did not match") assert.EqualError(t, err, "certificate signature did not match")
// verified cert at path // verified cert at path
crt.Sign(cert.Curve_CURVE25519, caPriv) crt.Sign(caPriv)
b, _ = crt.MarshalToPEM() b, _ = crt.MarshalToPEM()
certFile.Truncate(0) certFile.Truncate(0)
certFile.Seek(0, 0) certFile.Seek(0, 0)

View File

@@ -1,10 +0,0 @@
//go:build !windows
// +build !windows
package main
import "github.com/sirupsen/logrus"
func HookLogger(l *logrus.Logger) {
// Do nothing, let the logs flow to stdout/stderr
}

View File

@@ -1,54 +0,0 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/kardianos/service"
"github.com/sirupsen/logrus"
)
// HookLogger routes the logrus logs through the service logger so that they end up in the Windows Event Viewer
// logrus output will be discarded
func HookLogger(l *logrus.Logger) {
l.AddHook(newLogHook(logger))
l.SetOutput(ioutil.Discard)
}
type logHook struct {
sl service.Logger
}
func newLogHook(sl service.Logger) *logHook {
return &logHook{sl: sl}
}
func (h *logHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return h.sl.Error(line)
case logrus.FatalLevel:
return h.sl.Error(line)
case logrus.ErrorLevel:
return h.sl.Error(line)
case logrus.WarnLevel:
return h.sl.Warning(line)
case logrus.InfoLevel:
return h.sl.Info(line)
case logrus.DebugLevel:
return h.sl.Info(line)
default:
return nil
}
}
func (h *logHook) Levels() []logrus.Level {
return logrus.AllLevels
}

View File

@@ -7,8 +7,6 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/util"
) )
// A version string that can be set with // A version string that can be set with
@@ -48,20 +46,19 @@ func main() {
os.Exit(1) os.Exit(1)
} }
l := logrus.New() config := nebula.NewConfig()
l.Out = os.Stdout err := config.Load(*configPath)
c := config.NewC(l)
err := c.Load(*configPath)
if err != nil { if err != nil {
fmt.Printf("failed to load config: %s", err) fmt.Printf("failed to load config: %s", err)
os.Exit(1) os.Exit(1)
} }
ctrl, err := nebula.Main(c, *configTest, Build, l, nil) l := logrus.New()
l.Out = os.Stdout
c, err := nebula.Main(config, *configTest, Build, l, nil)
switch v := err.(type) { switch v := err.(type) {
case util.ContextualError: case nebula.ContextualError:
v.Log(l) v.Log(l)
os.Exit(1) os.Exit(1)
case error: case error:
@@ -70,8 +67,8 @@ func main() {
} }
if !*configTest { if !*configTest {
ctrl.Start() c.Start()
ctrl.ShutdownBlock() c.ShutdownBlock()
} }
os.Exit(0) os.Exit(0)

View File

@@ -9,7 +9,6 @@ import (
"github.com/kardianos/service" "github.com/kardianos/service"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
) )
var logger service.Logger var logger service.Logger
@@ -25,16 +24,15 @@ func (p *program) Start(s service.Service) error {
// Start should not block. // Start should not block.
logger.Info("Nebula service starting.") logger.Info("Nebula service starting.")
l := logrus.New() config := nebula.NewConfig()
HookLogger(l) err := config.Load(*p.configPath)
c := config.NewC(l)
err := c.Load(*p.configPath)
if err != nil { if err != nil {
return fmt.Errorf("failed to load config: %s", err) return fmt.Errorf("failed to load config: %s", err)
} }
p.control, err = nebula.Main(c, *p.configTest, Build, l, nil) l := logrus.New()
l.Out = os.Stdout
p.control, err = nebula.Main(config, *p.configTest, Build, l, nil)
if err != nil { if err != nil {
return err return err
} }
@@ -49,14 +47,6 @@ func (p *program) Stop(s service.Service) error {
return nil return nil
} }
func fileExists(filename string) bool {
_, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return true
}
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) { func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
if *configPath == "" { if *configPath == "" {
ex, err := os.Executable() ex, err := os.Executable()
@@ -64,9 +54,6 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
panic(err) panic(err)
} }
*configPath = filepath.Dir(ex) + "/config.yaml" *configPath = filepath.Dir(ex) + "/config.yaml"
if !fileExists(*configPath) {
*configPath = filepath.Dir(ex) + "/config.yml"
}
} }
svcConfig := &service.Config{ svcConfig := &service.Config{
@@ -82,10 +69,6 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
build: build, build: build,
} }
// Here are what the different loggers are doing:
// - `log` is the standard go log utility, meant to be used while the process is still attached to stdout/stderr
// - `logger` is the service log utility that may be attached to a special place depending on OS (Windows will have it attached to the event log)
// - above, in `Run` we create a `logrus.Logger` which is what nebula expects to use
s, err := service.New(prg, svcConfig) s, err := service.New(prg, svcConfig)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@@ -101,7 +84,6 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
for { for {
err := <-errs err := <-errs
if err != nil { if err != nil {
// Route any errors from the system logger to stdout as a best effort to notice issues there
log.Print(err) log.Print(err)
} }
} }
@@ -111,7 +93,6 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
case "run": case "run":
err = s.Run() err = s.Run()
if err != nil { if err != nil {
// Route any errors to the system logger
logger.Error(err) logger.Error(err)
} }
default: default:

View File

@@ -7,8 +7,6 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/util"
) )
// A version string that can be set with // A version string that can be set with
@@ -42,20 +40,19 @@ func main() {
os.Exit(1) os.Exit(1)
} }
l := logrus.New() config := nebula.NewConfig()
l.Out = os.Stdout err := config.Load(*configPath)
c := config.NewC(l)
err := c.Load(*configPath)
if err != nil { if err != nil {
fmt.Printf("failed to load config: %s", err) fmt.Printf("failed to load config: %s", err)
os.Exit(1) os.Exit(1)
} }
ctrl, err := nebula.Main(c, *configTest, Build, l, nil) l := logrus.New()
l.Out = os.Stdout
c, err := nebula.Main(config, *configTest, Build, l, nil)
switch v := err.(type) { switch v := err.(type) {
case util.ContextualError: case nebula.ContextualError:
v.Log(l) v.Log(l)
os.Exit(1) os.Exit(1)
case error: case error:
@@ -64,8 +61,8 @@ func main() {
} }
if !*configTest { if !*configTest {
ctrl.Start() c.Start()
ctrl.ShutdownBlock() c.ShutdownBlock()
} }
os.Exit(0) os.Exit(0)

View File

@@ -1,17 +1,17 @@
package config package nebula
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync"
"syscall" "syscall"
"time" "time"
@@ -20,25 +20,22 @@ import (
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
type C struct { type Config struct {
path string path string
files []string files []string
Settings map[interface{}]interface{} Settings map[interface{}]interface{}
oldSettings map[interface{}]interface{} oldSettings map[interface{}]interface{}
callbacks []func(*C) callbacks []func(*Config)
l *logrus.Logger
reloadLock sync.Mutex
} }
func NewC(l *logrus.Logger) *C { func NewConfig() *Config {
return &C{ return &Config{
Settings: make(map[interface{}]interface{}), Settings: make(map[interface{}]interface{}),
l: l,
} }
} }
// Load will find all yaml files within path and load them in lexical order // Load will find all yaml files within path and load them in lexical order
func (c *C) Load(path string) error { func (c *Config) Load(path string) error {
c.path = path c.path = path
c.files = make([]string, 0) c.files = make([]string, 0)
@@ -61,7 +58,7 @@ func (c *C) Load(path string) error {
return nil return nil
} }
func (c *C) LoadString(raw string) error { func (c *Config) LoadString(raw string) error {
if raw == "" { if raw == "" {
return errors.New("Empty configuration") return errors.New("Empty configuration")
} }
@@ -72,21 +69,16 @@ func (c *C) LoadString(raw string) error {
// here should decide if they need to make a change to the current process before making the change. HasChanged can be // here should decide if they need to make a change to the current process before making the change. HasChanged can be
// used to help decide if a change is necessary. // used to help decide if a change is necessary.
// These functions should return quickly or spawn their own go routine if they will take a while // These functions should return quickly or spawn their own go routine if they will take a while
func (c *C) RegisterReloadCallback(f func(*C)) { func (c *Config) RegisterReloadCallback(f func(*Config)) {
c.callbacks = append(c.callbacks, f) c.callbacks = append(c.callbacks, f)
} }
// InitialLoad returns true if this is the first load of the config, and ReloadConfig has not been called yet.
func (c *C) InitialLoad() bool {
return c.oldSettings == nil
}
// HasChanged checks if the underlying structure of the provided key has changed after a config reload. The value of // HasChanged checks if the underlying structure of the provided key has changed after a config reload. The value of
// k in both the old and new settings will be serialized, the result of the string comparison is returned. // k in both the old and new settings will be serialized, the result of the string comparison is returned.
// If k is an empty string the entire config is tested. // If k is an empty string the entire config is tested.
// It's important to note that this is very rudimentary and susceptible to configuration ordering issues indicating // It's important to note that this is very rudimentary and susceptible to configuration ordering issues indicating
// there is change when there actually wasn't any. // there is change when there actually wasn't any.
func (c *C) HasChanged(k string) bool { func (c *Config) HasChanged(k string) bool {
if c.oldSettings == nil { if c.oldSettings == nil {
return false return false
} }
@@ -107,12 +99,12 @@ func (c *C) HasChanged(k string) bool {
newVals, err := yaml.Marshal(nv) newVals, err := yaml.Marshal(nv)
if err != nil { if err != nil {
c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling new config") l.WithField("config_path", k).WithError(err).Error("Error while marshaling new config")
} }
oldVals, err := yaml.Marshal(ov) oldVals, err := yaml.Marshal(ov)
if err != nil { if err != nil {
c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling old config") l.WithField("config_path", k).WithError(err).Error("Error while marshaling old config")
} }
return string(newVals) != string(oldVals) return string(newVals) != string(oldVals)
@@ -120,29 +112,19 @@ func (c *C) HasChanged(k string) bool {
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the // CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
// original path provided to Load. The old settings are shallow copied for change detection after the reload. // original path provided to Load. The old settings are shallow copied for change detection after the reload.
func (c *C) CatchHUP(ctx context.Context) { func (c *Config) CatchHUP() {
ch := make(chan os.Signal, 1) ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP) signal.Notify(ch, syscall.SIGHUP)
go func() { go func() {
for { for range ch {
select { l.Info("Caught HUP, reloading config")
case <-ctx.Done():
signal.Stop(ch)
close(ch)
return
case <-ch:
c.l.Info("Caught HUP, reloading config")
c.ReloadConfig() c.ReloadConfig()
} }
}
}() }()
} }
func (c *C) ReloadConfig() { func (c *Config) ReloadConfig() {
c.reloadLock.Lock()
defer c.reloadLock.Unlock()
c.oldSettings = make(map[interface{}]interface{}) c.oldSettings = make(map[interface{}]interface{})
for k, v := range c.Settings { for k, v := range c.Settings {
c.oldSettings[k] = v c.oldSettings[k] = v
@@ -150,7 +132,7 @@ func (c *C) ReloadConfig() {
err := c.Load(c.path) err := c.Load(c.path)
if err != nil { if err != nil {
c.l.WithField("config_path", c.path).WithError(err).Error("Error occurred while reloading config") l.WithField("config_path", c.path).WithError(err).Error("Error occurred while reloading config")
return return
} }
@@ -159,29 +141,8 @@ func (c *C) ReloadConfig() {
} }
} }
func (c *C) ReloadConfigString(raw string) error {
c.reloadLock.Lock()
defer c.reloadLock.Unlock()
c.oldSettings = make(map[interface{}]interface{})
for k, v := range c.Settings {
c.oldSettings[k] = v
}
err := c.LoadString(raw)
if err != nil {
return err
}
for _, v := range c.callbacks {
v(c)
}
return nil
}
// GetString will get the string for k or return the default d if not found or invalid // GetString will get the string for k or return the default d if not found or invalid
func (c *C) GetString(k, d string) string { func (c *Config) GetString(k, d string) string {
r := c.Get(k) r := c.Get(k)
if r == nil { if r == nil {
return d return d
@@ -191,7 +152,7 @@ func (c *C) GetString(k, d string) string {
} }
// GetStringSlice will get the slice of strings for k or return the default d if not found or invalid // GetStringSlice will get the slice of strings for k or return the default d if not found or invalid
func (c *C) GetStringSlice(k string, d []string) []string { func (c *Config) GetStringSlice(k string, d []string) []string {
r := c.Get(k) r := c.Get(k)
if r == nil { if r == nil {
return d return d
@@ -211,7 +172,7 @@ func (c *C) GetStringSlice(k string, d []string) []string {
} }
// GetMap will get the map for k or return the default d if not found or invalid // GetMap will get the map for k or return the default d if not found or invalid
func (c *C) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} { func (c *Config) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} {
r := c.Get(k) r := c.Get(k)
if r == nil { if r == nil {
return d return d
@@ -226,7 +187,7 @@ func (c *C) GetMap(k string, d map[interface{}]interface{}) map[interface{}]inte
} }
// GetInt will get the int for k or return the default d if not found or invalid // GetInt will get the int for k or return the default d if not found or invalid
func (c *C) GetInt(k string, d int) int { func (c *Config) GetInt(k string, d int) int {
r := c.GetString(k, strconv.Itoa(d)) r := c.GetString(k, strconv.Itoa(d))
v, err := strconv.Atoi(r) v, err := strconv.Atoi(r)
if err != nil { if err != nil {
@@ -237,7 +198,7 @@ func (c *C) GetInt(k string, d int) int {
} }
// GetBool will get the bool for k or return the default d if not found or invalid // GetBool will get the bool for k or return the default d if not found or invalid
func (c *C) GetBool(k string, d bool) bool { func (c *Config) GetBool(k string, d bool) bool {
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d))) r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
v, err := strconv.ParseBool(r) v, err := strconv.ParseBool(r)
if err != nil { if err != nil {
@@ -254,7 +215,7 @@ func (c *C) GetBool(k string, d bool) bool {
} }
// GetDuration will get the duration for k or return the default d if not found or invalid // GetDuration will get the duration for k or return the default d if not found or invalid
func (c *C) GetDuration(k string, d time.Duration) time.Duration { func (c *Config) GetDuration(k string, d time.Duration) time.Duration {
r := c.GetString(k, "") r := c.GetString(k, "")
v, err := time.ParseDuration(r) v, err := time.ParseDuration(r)
if err != nil { if err != nil {
@@ -263,15 +224,138 @@ func (c *C) GetDuration(k string, d time.Duration) time.Duration {
return v return v
} }
func (c *C) Get(k string) interface{} { func (c *Config) GetAllowList(k string, allowInterfaces bool) (*AllowList, error) {
r := c.Get(k)
if r == nil {
return nil, nil
}
rawMap, ok := r.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, r)
}
tree := NewCIDRTree()
var nameRules []AllowListNameRule
firstValue := true
allValuesMatch := true
defaultSet := false
var allValues bool
for rawKey, rawValue := range rawMap {
rawCIDR, ok := rawKey.(string)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
}
// Special rule for interface names
if rawCIDR == "interfaces" {
if !allowInterfaces {
return nil, fmt.Errorf("config `%s` does not support `interfaces`", k)
}
var err error
nameRules, err = c.getAllowListInterfaces(k, rawValue)
if err != nil {
return nil, err
}
continue
}
value, ok := rawValue.(bool)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
}
_, cidr, err := net.ParseCIDR(rawCIDR)
if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
}
// TODO: should we error on duplicate CIDRs in the config?
tree.AddCIDR(cidr, value)
if firstValue {
allValues = value
firstValue = false
} else {
if value != allValues {
allValuesMatch = false
}
}
// Check if this is 0.0.0.0/0
bits, size := cidr.Mask.Size()
if bits == 0 && size == 32 {
defaultSet = true
}
}
if !defaultSet {
if allValuesMatch {
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
tree.AddCIDR(zeroCIDR, !allValues)
} else {
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
}
}
return &AllowList{cidrTree: tree, nameRules: nameRules}, nil
}
func (c *Config) getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) {
var nameRules []AllowListNameRule
rawRules, ok := v.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
}
firstEntry := true
var allValues bool
for rawName, rawAllow := range rawRules {
name, ok := rawName.(string)
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName)
}
allow, ok := rawAllow.(bool)
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
}
nameRE, err := regexp.Compile("^" + name + "$")
if err != nil {
return nil, fmt.Errorf("config `%s.interfaces` has invalid key: %s: %v", k, name, err)
}
nameRules = append(nameRules, AllowListNameRule{
Name: nameRE,
Allow: allow,
})
if firstEntry {
allValues = allow
firstEntry = false
} else {
if allow != allValues {
return nil, fmt.Errorf("config `%s.interfaces` values must all be the same true/false value", k)
}
}
}
return nameRules, nil
}
func (c *Config) Get(k string) interface{} {
return c.get(k, c.Settings) return c.get(k, c.Settings)
} }
func (c *C) IsSet(k string) bool { func (c *Config) IsSet(k string) bool {
return c.get(k, c.Settings) != nil return c.get(k, c.Settings) != nil
} }
func (c *C) get(k string, v interface{}) interface{} { func (c *Config) get(k string, v interface{}) interface{} {
parts := strings.Split(k, ".") parts := strings.Split(k, ".")
for _, p := range parts { for _, p := range parts {
m, ok := v.(map[interface{}]interface{}) m, ok := v.(map[interface{}]interface{})
@@ -290,7 +374,7 @@ func (c *C) get(k string, v interface{}) interface{} {
// direct signifies if this is the config path directly specified by the user, // direct signifies if this is the config path directly specified by the user,
// versus a file/dir found by recursing into that path // versus a file/dir found by recursing into that path
func (c *C) resolve(path string, direct bool) error { func (c *Config) resolve(path string, direct bool) error {
i, err := os.Stat(path) i, err := os.Stat(path)
if err != nil { if err != nil {
return nil return nil
@@ -316,7 +400,7 @@ func (c *C) resolve(path string, direct bool) error {
return nil return nil
} }
func (c *C) addFile(path string, direct bool) error { func (c *Config) addFile(path string, direct bool) error {
ext := filepath.Ext(path) ext := filepath.Ext(path)
if !direct && ext != ".yaml" && ext != ".yml" { if !direct && ext != ".yaml" && ext != ".yml" {
@@ -332,7 +416,7 @@ func (c *C) addFile(path string, direct bool) error {
return nil return nil
} }
func (c *C) parseRaw(b []byte) error { func (c *Config) parseRaw(b []byte) error {
var m map[interface{}]interface{} var m map[interface{}]interface{}
err := yaml.Unmarshal(b, &m) err := yaml.Unmarshal(b, &m)
@@ -344,7 +428,7 @@ func (c *C) parseRaw(b []byte) error {
return nil return nil
} }
func (c *C) parse() error { func (c *Config) parse() error {
var m map[interface{}]interface{} var m map[interface{}]interface{}
for _, path := range c.files { for _, path := range c.files {
@@ -387,3 +471,38 @@ func readDirNames(path string) ([]string, error) {
sort.Strings(paths) sort.Strings(paths)
return paths, nil return paths, nil
} }
func configLogger(c *Config) error {
// set up our logging level
logLevel, err := logrus.ParseLevel(strings.ToLower(c.GetString("logging.level", "info")))
if err != nil {
return fmt.Errorf("%s; possible levels: %s", err, logrus.AllLevels)
}
l.SetLevel(logLevel)
disableTimestamp := c.GetBool("logging.disable_timestamp", false)
timestampFormat := c.GetString("logging.timestamp_format", "")
fullTimestamp := (timestampFormat != "")
if timestampFormat == "" {
timestampFormat = time.RFC3339
}
logFormat := strings.ToLower(c.GetString("logging.format", "text"))
switch logFormat {
case "text":
l.Formatter = &logrus.TextFormatter{
TimestampFormat: timestampFormat,
FullTimestamp: fullTimestamp,
DisableTimestamp: disableTimestamp,
}
case "json":
l.Formatter = &logrus.JSONFormatter{
TimestampFormat: timestampFormat,
DisableTimestamp: disableTimestamp,
}
default:
return fmt.Errorf("unknown log format `%s`. possible formats: %s", logFormat, []string{"text", "json"})
}
return nil
}

View File

@@ -1,4 +1,4 @@
package config package nebula
import ( import (
"io/ioutil" "io/ioutil"
@@ -7,23 +7,18 @@ import (
"testing" "testing"
"time" "time"
"github.com/imdario/mergo"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
) )
func TestConfig_Load(t *testing.T) { func TestConfig_Load(t *testing.T) {
l := test.NewLogger()
dir, err := ioutil.TempDir("", "config-test") dir, err := ioutil.TempDir("", "config-test")
// invalid yaml // invalid yaml
c := NewC(l) c := NewConfig()
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}") assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
// simple multi config merge // simple multi config merge
c = NewC(l) c = NewConfig()
os.RemoveAll(dir) os.RemoveAll(dir)
os.Mkdir(dir, 0755) os.Mkdir(dir, 0755)
@@ -45,9 +40,8 @@ func TestConfig_Load(t *testing.T) {
} }
func TestConfig_Get(t *testing.T) { func TestConfig_Get(t *testing.T) {
l := test.NewLogger()
// test simple type // test simple type
c := NewC(l) c := NewConfig()
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"} c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"}
assert.Equal(t, "hi", c.Get("firewall.outbound")) assert.Equal(t, "hi", c.Get("firewall.outbound"))
@@ -61,15 +55,13 @@ func TestConfig_Get(t *testing.T) {
} }
func TestConfig_GetStringSlice(t *testing.T) { func TestConfig_GetStringSlice(t *testing.T) {
l := test.NewLogger() c := NewConfig()
c := NewC(l)
c.Settings["slice"] = []interface{}{"one", "two"} c.Settings["slice"] = []interface{}{"one", "two"}
assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{})) assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{}))
} }
func TestConfig_GetBool(t *testing.T) { func TestConfig_GetBool(t *testing.T) {
l := test.NewLogger() c := NewConfig()
c := NewC(l)
c.Settings["bool"] = true c.Settings["bool"] = true
assert.Equal(t, true, c.GetBool("bool", false)) assert.Equal(t, true, c.GetBool("bool", false))
@@ -95,22 +87,91 @@ func TestConfig_GetBool(t *testing.T) {
assert.Equal(t, false, c.GetBool("bool", true)) assert.Equal(t, false, c.GetBool("bool", true))
} }
func TestConfig_GetAllowList(t *testing.T) {
c := NewConfig()
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0": true,
}
r, err := c.GetAllowList("allowlist", false)
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
assert.Nil(t, r)
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0/16": "abc",
}
r, err = c.GetAllowList("allowlist", false)
assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0/16": true,
"10.0.0.0/8": false,
}
r, err = c.GetAllowList("allowlist", false)
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
c.Settings["allowlist"] = map[interface{}]interface{}{
"0.0.0.0/0": true,
"10.0.0.0/8": false,
"10.42.42.0/24": true,
}
r, err = c.GetAllowList("allowlist", false)
if assert.NoError(t, err) {
assert.NotNil(t, r)
}
// Test interface names
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: false,
},
}
r, err = c.GetAllowList("allowlist", false)
assert.EqualError(t, err, "config `allowlist` does not support `interfaces`")
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: "foo",
},
}
r, err = c.GetAllowList("allowlist", true)
assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: false,
`eth.*`: true,
},
}
r, err = c.GetAllowList("allowlist", true)
assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: false,
},
}
r, err = c.GetAllowList("allowlist", true)
if assert.NoError(t, err) {
assert.NotNil(t, r)
}
}
func TestConfig_HasChanged(t *testing.T) { func TestConfig_HasChanged(t *testing.T) {
l := test.NewLogger()
// No reload has occurred, return false // No reload has occurred, return false
c := NewC(l) c := NewConfig()
c.Settings["test"] = "hi" c.Settings["test"] = "hi"
assert.False(t, c.HasChanged("")) assert.False(t, c.HasChanged(""))
// Test key change // Test key change
c = NewC(l) c = NewConfig()
c.Settings["test"] = "hi" c.Settings["test"] = "hi"
c.oldSettings = map[interface{}]interface{}{"test": "no"} c.oldSettings = map[interface{}]interface{}{"test": "no"}
assert.True(t, c.HasChanged("test")) assert.True(t, c.HasChanged("test"))
assert.True(t, c.HasChanged("")) assert.True(t, c.HasChanged(""))
// No key change // No key change
c = NewC(l) c = NewConfig()
c.Settings["test"] = "hi" c.Settings["test"] = "hi"
c.oldSettings = map[interface{}]interface{}{"test": "hi"} c.oldSettings = map[interface{}]interface{}{"test": "hi"}
assert.False(t, c.HasChanged("test")) assert.False(t, c.HasChanged("test"))
@@ -118,13 +179,12 @@ func TestConfig_HasChanged(t *testing.T) {
} }
func TestConfig_ReloadConfig(t *testing.T) { func TestConfig_ReloadConfig(t *testing.T) {
l := test.NewLogger()
done := make(chan bool, 1) done := make(chan bool, 1)
dir, err := ioutil.TempDir("", "config-test") dir, err := ioutil.TempDir("", "config-test")
assert.Nil(t, err) assert.Nil(t, err)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
c := NewC(l) c := NewConfig()
assert.Nil(t, c.Load(dir)) assert.Nil(t, c.Load(dir))
assert.False(t, c.HasChanged("outer.inner")) assert.False(t, c.HasChanged("outer.inner"))
@@ -133,7 +193,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
c.RegisterReloadCallback(func(c *C) { c.RegisterReloadCallback(func(c *Config) {
done <- true done <- true
}) })
@@ -150,77 +210,3 @@ func TestConfig_ReloadConfig(t *testing.T) {
} }
} }
// Ensure mergo merges are done the way we expect.
// This is needed to test for potential regressions, like:
// - https://github.com/imdario/mergo/issues/187
func TestConfig_MergoMerge(t *testing.T) {
configs := [][]byte{
[]byte(`
listen:
port: 1234
`),
[]byte(`
firewall:
inbound:
- port: 443
proto: tcp
groups:
- server
- port: 443
proto: tcp
groups:
- webapp
`),
[]byte(`
listen:
host: 0.0.0.0
port: 4242
firewall:
outbound:
- port: any
proto: any
host: any
inbound:
- port: any
proto: icmp
host: any
`),
}
var m map[any]any
// merge the same way config.parse() merges
for _, b := range configs {
var nm map[any]any
err := yaml.Unmarshal(b, &nm)
require.NoError(t, err)
// We need to use WithAppendSlice so that firewall rules in separate
// files are appended together
err = mergo.Merge(&nm, m, mergo.WithAppendSlice)
m = nm
require.NoError(t, err)
}
t.Logf("Merged Config: %#v", m)
mYaml, err := yaml.Marshal(m)
require.NoError(t, err)
t.Logf("Merged Config as YAML:\n%s", mYaml)
// If a bug is present, some items might be replaced instead of merged like we expect
expected := map[any]any{
"firewall": map[any]any{
"inbound": []any{
map[any]any{"host": "any", "port": "any", "proto": "icmp"},
map[any]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
map[any]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
"outbound": []any{
map[any]any{"host": "any", "port": "any", "proto": "any"}}},
"listen": map[any]any{
"host": "0.0.0.0",
"port": 4242,
},
}
assert.Equal(t, expected, m)
}

View File

@@ -1,490 +1,254 @@
package nebula package nebula
import ( import (
"bytes"
"context"
"sync" "sync"
"time" "time"
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
type trafficDecision int // TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet
// and something like every 10 packets we could lock, send 10, then unlock for a moment
const (
doNothing trafficDecision = 0
deleteTunnel trafficDecision = 1 // delete the hostinfo on our side, do not notify the remote
closeTunnel trafficDecision = 2 // delete the hostinfo and notify the remote
swapPrimary trafficDecision = 3
migrateRelays trafficDecision = 4
tryRehandshake trafficDecision = 5
)
type connectionManager struct { type connectionManager struct {
hostMap *HostMap
in map[uint32]struct{} in map[uint32]struct{}
inLock *sync.RWMutex inLock *sync.RWMutex
inCount int
out map[uint32]struct{} out map[uint32]struct{}
outLock *sync.RWMutex outLock *sync.RWMutex
outCount int
// relayUsed holds which relay localIndexs are in use TrafficTimer *SystemTimerWheel
relayUsed map[uint32]struct{}
relayUsedLock *sync.RWMutex
hostMap *HostMap
trafficTimer *LockingTimerWheel[uint32]
intf *Interface intf *Interface
pendingDeletion map[uint32]struct{}
punchy *Punchy
checkInterval time.Duration
pendingDeletionInterval time.Duration
metricsTxPunchy metrics.Counter
l *logrus.Logger pendingDeletion map[uint32]int
pendingDeletionLock *sync.RWMutex
pendingDeletionTimer *SystemTimerWheel
checkInterval int
pendingDeletionInterval int
// I wanted to call one matLock
} }
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager { func newConnectionManager(intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager {
var max time.Duration
if checkInterval < pendingDeletionInterval {
max = pendingDeletionInterval
} else {
max = checkInterval
}
nc := &connectionManager{ nc := &connectionManager{
hostMap: intf.hostMap, hostMap: intf.hostMap,
in: make(map[uint32]struct{}), in: make(map[uint32]struct{}),
inLock: &sync.RWMutex{}, inLock: &sync.RWMutex{},
inCount: 0,
out: make(map[uint32]struct{}), out: make(map[uint32]struct{}),
outLock: &sync.RWMutex{}, outLock: &sync.RWMutex{},
relayUsed: make(map[uint32]struct{}), outCount: 0,
relayUsedLock: &sync.RWMutex{}, TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
intf: intf, intf: intf,
pendingDeletion: make(map[uint32]struct{}), pendingDeletion: make(map[uint32]int),
pendingDeletionLock: &sync.RWMutex{},
pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
checkInterval: checkInterval, checkInterval: checkInterval,
pendingDeletionInterval: pendingDeletionInterval, pendingDeletionInterval: pendingDeletionInterval,
punchy: punchy,
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
l: l,
} }
nc.Start()
nc.Start(ctx)
return nc return nc
} }
func (n *connectionManager) In(localIndex uint32) { func (n *connectionManager) In(ip uint32) {
n.inLock.RLock() n.inLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.in[localIndex]; ok { if _, ok := n.in[ip]; ok {
n.inLock.RUnlock() n.inLock.RUnlock()
return return
} }
n.inLock.RUnlock() n.inLock.RUnlock()
n.inLock.Lock() n.inLock.Lock()
n.in[localIndex] = struct{}{} n.in[ip] = struct{}{}
n.inLock.Unlock() n.inLock.Unlock()
} }
func (n *connectionManager) Out(localIndex uint32) { func (n *connectionManager) Out(ip uint32) {
n.outLock.RLock() n.outLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.out[localIndex]; ok { if _, ok := n.out[ip]; ok {
n.outLock.RUnlock() n.outLock.RUnlock()
return return
} }
n.outLock.RUnlock() n.outLock.RUnlock()
n.outLock.Lock() n.outLock.Lock()
n.out[localIndex] = struct{}{} // double check since we dropped the lock temporarily
if _, ok := n.out[ip]; ok {
n.outLock.Unlock()
return
}
n.out[ip] = struct{}{}
n.AddTrafficWatch(ip, n.checkInterval)
n.outLock.Unlock() n.outLock.Unlock()
} }
func (n *connectionManager) RelayUsed(localIndex uint32) { func (n *connectionManager) CheckIn(vpnIP uint32) bool {
n.relayUsedLock.RLock() n.inLock.RLock()
// If this already exists, return if _, ok := n.in[vpnIP]; ok {
if _, ok := n.relayUsed[localIndex]; ok { n.inLock.RUnlock()
n.relayUsedLock.RUnlock() return true
return
} }
n.relayUsedLock.RUnlock() n.inLock.RUnlock()
n.relayUsedLock.Lock() return false
n.relayUsed[localIndex] = struct{}{}
n.relayUsedLock.Unlock()
} }
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and func (n *connectionManager) ClearIP(ip uint32) {
// resets the state for this local index
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
n.inLock.Lock() n.inLock.Lock()
n.outLock.Lock() n.outLock.Lock()
_, in := n.in[localIndex] delete(n.in, ip)
_, out := n.out[localIndex] delete(n.out, ip)
delete(n.in, localIndex)
delete(n.out, localIndex)
n.inLock.Unlock() n.inLock.Unlock()
n.outLock.Unlock() n.outLock.Unlock()
return in, out
} }
func (n *connectionManager) AddTrafficWatch(localIndex uint32) { func (n *connectionManager) ClearPendingDeletion(ip uint32) {
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index n.pendingDeletionLock.Lock()
n.outLock.Lock() delete(n.pendingDeletion, ip)
if _, ok := n.out[localIndex]; ok { n.pendingDeletionLock.Unlock()
n.outLock.Unlock() }
return
func (n *connectionManager) AddPendingDeletion(ip uint32) {
n.pendingDeletionLock.Lock()
if _, ok := n.pendingDeletion[ip]; ok {
n.pendingDeletion[ip] += 1
} else {
n.pendingDeletion[ip] = 0
} }
n.out[localIndex] = struct{}{} n.pendingDeletionTimer.Add(ip, time.Second*time.Duration(n.pendingDeletionInterval))
n.trafficTimer.Add(localIndex, n.checkInterval) n.pendingDeletionLock.Unlock()
n.outLock.Unlock()
} }
func (n *connectionManager) Start(ctx context.Context) { func (n *connectionManager) checkPendingDeletion(ip uint32) bool {
go n.Run(ctx) n.pendingDeletionLock.RLock()
if _, ok := n.pendingDeletion[ip]; ok {
n.pendingDeletionLock.RUnlock()
return true
}
n.pendingDeletionLock.RUnlock()
return false
} }
func (n *connectionManager) Run(ctx context.Context) { func (n *connectionManager) AddTrafficWatch(vpnIP uint32, seconds int) {
//TODO: this tick should be based on the min wheel tick? Check firewall n.TrafficTimer.Add(vpnIP, time.Second*time.Duration(seconds))
clockSource := time.NewTicker(500 * time.Millisecond) }
defer clockSource.Stop()
p := []byte("") func (n *connectionManager) Start() {
nb := make([]byte, 12, 12) go n.Run()
out := make([]byte, mtu) }
func (n *connectionManager) Run() {
clockSource := time.Tick(500 * time.Millisecond)
for now := range clockSource {
n.HandleMonitorTick(now)
n.HandleDeletionTick(now)
}
}
func (n *connectionManager) HandleMonitorTick(now time.Time) {
n.TrafficTimer.advance(now)
for { for {
select { ep := n.TrafficTimer.Purge()
case <-ctx.Done(): if ep == nil {
return
case now := <-clockSource.C:
n.trafficTimer.Advance(now)
for {
localIndex, has := n.trafficTimer.Purge()
if !has {
break break
} }
n.doTrafficCheck(localIndex, p, nb, out, now) vpnIP := ep.(uint32)
}
}
}
}
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) { // Check for traffic coming back in from this host.
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now) traf := n.CheckIn(vpnIP)
switch decision { // If we saw incoming packets from this ip, just return
case deleteTunnel: if traf {
if n.hostMap.DeleteHostInfo(hostinfo) { if l.Level >= logrus.DebugLevel {
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap l.WithField("vpnIp", IntIp(vpnIP)).
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
}
case closeTunnel:
n.intf.sendCloseTunnel(hostinfo)
n.intf.closeTunnel(hostinfo)
case swapPrimary:
n.swapPrimary(hostinfo, primary)
case migrateRelays:
n.migrateRelayUsed(hostinfo, primary)
case tryRehandshake:
n.tryRehandshake(hostinfo)
}
n.resetRelayTrafficCheck(hostinfo)
}
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
if hostinfo != nil {
n.relayUsedLock.Lock()
defer n.relayUsedLock.Unlock()
// No need to migrate any relays, delete usage info now.
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
delete(n.relayUsed, idx)
}
}
}
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
for _, r := range relayFor {
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
var index uint32
var relayFrom iputil.VpnIp
var relayTo iputil.VpnIp
switch {
case ok && existing.State == Established:
// This relay already exists in newhostinfo, then do nothing.
continue
case ok && existing.State == Requested:
// The relay exists in a Requested state; re-send the request
index = existing.LocalIndex
switch r.Type {
case TerminalType:
relayFrom = newhostinfo.vpnIp
relayTo = existing.PeerIp
case ForwardingType:
relayFrom = existing.PeerIp
relayTo = newhostinfo.vpnIp
default:
// should never happen
}
case !ok:
n.relayUsedLock.RLock()
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
// The relay hasn't been used; don't migrate it.
n.relayUsedLock.RUnlock()
continue
}
n.relayUsedLock.RUnlock()
// The relay doesn't exist at all; create some relay state and send the request.
var err error
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
if err != nil {
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
continue
}
switch r.Type {
case TerminalType:
relayFrom = newhostinfo.vpnIp
relayTo = r.PeerIp
case ForwardingType:
relayFrom = r.PeerIp
relayTo = newhostinfo.vpnIp
default:
// should never happen
}
}
// Send a CreateRelayRequest to the peer.
req := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: index,
RelayFromIp: uint32(relayFrom),
RelayToIp: uint32(relayTo),
}
msg, err := req.Marshal()
if err != nil {
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
} else {
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
n.l.WithFields(logrus.Fields{
"relayFrom": iputil.VpnIp(req.RelayFromIp),
"relayTo": iputil.VpnIp(req.RelayToIp),
"initiatorRelayIndex": req.InitiatorRelayIndex,
"responderRelayIndex": req.ResponderRelayIndex,
"vpnIp": newhostinfo.vpnIp}).
Info("send CreateRelayRequest")
}
}
}
func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
n.hostMap.RLock()
defer n.hostMap.RUnlock()
hostinfo := n.hostMap.Indexes[localIndex]
if hostinfo == nil {
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
delete(n.pendingDeletion, localIndex)
return doNothing, nil, nil
}
if n.isInvalidCertificate(now, hostinfo) {
delete(n.pendingDeletion, hostinfo.localIndexId)
return closeTunnel, hostinfo, nil
}
primary := n.hostMap.Hosts[hostinfo.vpnIp]
mainHostInfo := true
if primary != nil && primary != hostinfo {
mainHostInfo = false
}
// Check for traffic on this hostinfo
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
// A hostinfo is determined alive if there is incoming traffic
if inTraffic {
decision := doNothing
if n.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
delete(n.pendingDeletion, hostinfo.localIndexId) n.ClearIP(vpnIP)
n.ClearPendingDeletion(vpnIP)
if mainHostInfo { continue
decision = tryRehandshake
} else {
if n.shouldSwapPrimary(hostinfo, primary) {
decision = swapPrimary
} else {
// migrate the relays to the primary, if in use.
decision = migrateRelays
}
} }
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval) // If we didn't we may need to probe or destroy the conn
hostinfo, err := n.hostMap.QueryVpnIP(vpnIP)
if !outTraffic { if err != nil {
// Send a punch packet to keep the NAT state alive l.Debugf("Not found in hostmap: %s", IntIp(vpnIP))
n.sendPunch(hostinfo) n.ClearIP(vpnIP)
n.ClearPendingDeletion(vpnIP)
continue
} }
return decision, hostinfo, primary hostinfo.logger().
}
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
// We have already sent a test packet and nothing was returned, this hostinfo is dead
hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
Info("Tunnel status")
delete(n.pendingDeletion, hostinfo.localIndexId)
return deleteTunnel, hostinfo, nil
}
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
if !outTraffic {
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
// Just maintain NAT state if configured to do so.
n.sendPunch(hostinfo)
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
return doNothing, nil, nil
}
if n.punchy.GetTargetEverything() {
// This is similar to the old punchy behavior with a slight optimization.
// We aren't receiving traffic but we are sending it, punch on all known
// ips in case we need to re-prime NAT state
n.sendPunch(hostinfo)
}
if n.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "testing", "method": "active"}). WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
Debug("Tunnel status") Debug("Tunnel status")
}
if hostinfo != nil && hostinfo.ConnectionState != nil {
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out) n.intf.SendMessageToVpnIp(test, testRequest, vpnIP, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
} else { } else {
if n.l.Level >= logrus.DebugLevel { hostinfo.logger().Debugf("Hostinfo sadness: %s", IntIp(vpnIP))
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
} }
n.AddPendingDeletion(vpnIP)
} }
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
return doNothing, nil, nil
} }
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool { func (n *connectionManager) HandleDeletionTick(now time.Time) {
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine. n.pendingDeletionTimer.advance(now)
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary. for {
// Let's sort this out. ep := n.pendingDeletionTimer.Purge()
if ep == nil {
if current.vpnIp < n.intf.myVpnIp { break
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
// The remotes vpn ip is lower than mine. I will not flip.
return false
} }
certState := n.intf.certState.Load() vpnIP := ep.(uint32)
return bytes.Equal(current.ConnectionState.certState.certificate.Signature, certState.certificate.Signature)
}
func (n *connectionManager) swapPrimary(current, primary *HostInfo) { // If we saw incoming packets from this ip, just return
n.hostMap.Lock() traf := n.CheckIn(vpnIP)
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake. if traf {
if n.hostMap.Hosts[current.vpnIp] == primary { l.WithField("vpnIp", IntIp(vpnIP)).
n.hostMap.unlockedMakePrimary(current) WithField("tunnelCheck", m{"state": "alive", "method": "active"}).
} Debug("Tunnel status")
n.hostMap.Unlock() n.ClearIP(vpnIP)
} n.ClearPendingDeletion(vpnIP)
continue
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
// check and return true.
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
remoteCert := hostinfo.GetCert()
if remoteCert == nil {
return false
} }
valid, err := remoteCert.VerifyWithCache(now, n.intf.caPool) hostinfo, err := n.hostMap.QueryVpnIP(vpnIP)
if valid { if err != nil {
return false n.ClearIP(vpnIP)
n.ClearPendingDeletion(vpnIP)
l.Debugf("Not found in hostmap: %s", IntIp(vpnIP))
continue
} }
if !n.intf.disconnectInvalid && err != cert.ErrBlockListed { // If it comes around on deletion wheel and hasn't resolved itself, delete
// Block listed certificates should always be disconnected if n.checkPendingDeletion(vpnIP) {
return false cn := ""
if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil {
cn = hostinfo.ConnectionState.peerCert.Details.Name
} }
hostinfo.logger().
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
WithField("certName", cn).
Info("Tunnel status")
fingerprint, _ := remoteCert.Sha256Sum() n.ClearIP(vpnIP)
hostinfo.logger(n.l).WithError(err). n.ClearPendingDeletion(vpnIP)
WithField("fingerprint", fingerprint). // TODO: This is only here to let tests work. Should do proper mocking
Info("Remote certificate is no longer valid, tearing down the tunnel") if n.intf.lightHouse != nil {
n.intf.lightHouse.DeleteVpnIP(vpnIP)
return true
}
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
if !n.punchy.GetPunch() {
// Punching is disabled
return
} }
n.hostMap.DeleteVpnIP(vpnIP)
if n.punchy.GetTargetEverything() { n.hostMap.DeleteIndex(hostinfo.localIndexId)
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) { } else {
n.metricsTxPunchy.Inc(1) n.ClearIP(vpnIP)
n.intf.outside.WriteTo([]byte{1}, addr) n.ClearPendingDeletion(vpnIP)
})
} else if hostinfo.remote != nil {
n.metricsTxPunchy.Inc(1)
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
}
}
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
certState := n.intf.certState.Load()
if bytes.Equal(hostinfo.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) {
return
}
n.l.WithField("vpnIp", hostinfo.vpnIp).
WithField("reason", "local certificate is not current").
Info("Re-handshaking with remote")
//TODO: this is copied from getOrHandshake to keep the extra checks out of the hot path, figure it out
newHostinfo := n.intf.handshakeManager.AddVpnIp(hostinfo.vpnIp, n.intf.initHostInfo)
if !newHostinfo.HandshakeReady {
ixHandshakeStage0(n.intf, newHostinfo.vpnIp, newHostinfo)
}
//If this is a static host, we don't need to wait for the HostQueryReply
//We can trigger the handshake right now
if _, ok := n.intf.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
select {
case n.intf.handshakeManager.trigger <- hostinfo.vpnIp:
default:
} }
} }
} }

View File

@@ -1,48 +1,26 @@
package nebula package nebula
import ( import (
"context"
"crypto/ed25519"
"crypto/rand"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
var vpnIp iputil.VpnIp var vpnIP uint32
func newTestLighthouse() *LightHouse {
lh := &LightHouse{
l: test.NewLogger(),
addrMap: map[iputil.VpnIp]*RemoteList{},
}
lighthouses := map[iputil.VpnIp]struct{}{}
staticList := map[iputil.VpnIp]struct{}{}
lh.lighthouses.Store(&lighthouses)
lh.staticList.Store(&staticList)
return lh
}
func Test_NewConnectionManagerTest(t *testing.T) { func Test_NewConnectionManagerTest(t *testing.T) {
l := test.NewLogger()
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24")
vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2")) vpnIP = ip2int(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
// Very incomplete mock objects // Very incomplete mock objects
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) hostMap := NewHostMap("test", vpncidr, preferredRanges)
cs := &CertState{ cs := &CertState{
rawCertificate: []byte{}, rawCertificate: []byte{},
privateKey: []byte{}, privateKey: []byte{},
@@ -50,78 +28,62 @@ func Test_NewConnectionManagerTest(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := newTestLighthouse() lh := NewLightHouse(false, 0, []uint32{}, 1000, 0, &udpConn{}, false, 1, false)
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &Tun{},
outside: &udp.Conn{}, outside: &udpConn{},
certState: cs,
firewall: &Firewall{}, firewall: &Firewall{},
lightHouse: lh, lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), handshakeManager: NewHandshakeManager(vpncidr, preferredRanges, hostMap, lh, &udpConn{}, defaultHandshakeConfig),
l: l,
} }
ifce.certState.Store(cs) now := time.Now()
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) nc := newConnectionManager(ifce, 5, 10)
defer cancel() nc.HandleMonitorTick(now)
punchy := NewPunchyFromConfig(l, config.NewC(l))
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
p := []byte("")
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
// Add an ip we have established a connection w/ to hostmap // Add an ip we have established a connection w/ to hostmap
hostinfo := &HostInfo{ hostinfo := nc.hostMap.AddVpnIP(vpnIP)
vpnIp: vpnIp,
localIndexId: 1099,
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, certState: cs,
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
messageCounter: new(uint64),
} }
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIP
nc.Out(hostinfo.localIndexId) nc.Out(vpnIP)
nc.In(hostinfo.localIndexId) assert.NotContains(t, nc.pendingDeletion, vpnIP)
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Hosts, vpnIP)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) // Move ahead 5s. Nothing should happen
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) next_tick := now.Add(5 * time.Second)
assert.Contains(t, nc.out, hostinfo.localIndexId) nc.HandleMonitorTick(next_tick)
nc.HandleDeletionTick(next_tick)
// Move ahead 6s. We haven't heard back
next_tick = now.Add(6 * time.Second)
nc.HandleMonitorTick(next_tick)
nc.HandleDeletionTick(next_tick)
// This host should now be up for deletion
assert.Contains(t, nc.pendingDeletion, vpnIP)
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
// Move ahead some more
next_tick = now.Add(45 * time.Second)
nc.HandleMonitorTick(next_tick)
nc.HandleDeletionTick(next_tick)
// The host should be evicted
assert.NotContains(t, nc.pendingDeletion, vpnIP)
assert.NotContains(t, nc.hostMap.Hosts, vpnIP)
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.NotContains(t, nc.in, hostinfo.localIndexId)
// Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo.localIndexId)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.NotContains(t, nc.in, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
// Do a final traffic check tick, the host should now be removed
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
} }
func Test_NewConnectionManagerTest2(t *testing.T) { func Test_NewConnectionManagerTest2(t *testing.T) {
l := test.NewLogger()
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
// Very incomplete mock objects // Very incomplete mock objects
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) hostMap := NewHostMap("test", vpncidr, preferredRanges)
cs := &CertState{ cs := &CertState{
rawCertificate: []byte{}, rawCertificate: []byte{},
privateKey: []byte{}, privateKey: []byte{},
@@ -129,163 +91,52 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := newTestLighthouse() lh := NewLightHouse(false, 0, []uint32{}, 1000, 0, &udpConn{}, false, 1, false)
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &Tun{},
outside: &udp.Conn{}, outside: &udpConn{},
certState: cs,
firewall: &Firewall{}, firewall: &Firewall{},
lightHouse: lh, lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), handshakeManager: NewHandshakeManager(vpncidr, preferredRanges, hostMap, lh, &udpConn{}, defaultHandshakeConfig),
l: l,
} }
ifce.certState.Store(cs)
// Create manager
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
punchy := NewPunchyFromConfig(l, config.NewC(l))
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
p := []byte("")
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
// Add an ip we have established a connection w/ to hostmap
hostinfo := &HostInfo{
vpnIp: vpnIp,
localIndexId: 1099,
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{
certState: cs,
H: &noise.HandshakeState{},
}
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp
nc.Out(hostinfo.localIndexId)
nc.In(hostinfo.localIndexId)
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.NotContains(t, nc.in, hostinfo.localIndexId)
// Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo.localIndexId)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.NotContains(t, nc.in, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
// We saw traffic, should no longer be pending deletion
nc.In(hostinfo.localIndexId)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.NotContains(t, nc.in, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
}
// Check if we can disconnect the peer.
// Validate if the peer's certificate is invalid (expired, etc.)
// Disconnect only if disconnectInvalid: true is set.
func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
now := time.Now() now := time.Now()
l := test.NewLogger()
ipNet := net.IPNet{
IP: net.IPv4(172, 1, 1, 2),
Mask: net.IPMask{255, 255, 255, 0},
}
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange}
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
// Generate keys for CA and peer's cert.
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
caCert := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "ca",
NotBefore: now,
NotAfter: now.Add(1 * time.Hour),
IsCA: true,
PublicKey: pubCA,
},
}
caCert.Sign(cert.Curve_CURVE25519, privCA)
ncp := &cert.NebulaCAPool{
CAs: cert.NewCAPool().CAs,
}
ncp.CAs["ca"] = &caCert
pubCrt, _, _ := ed25519.GenerateKey(rand.Reader)
peerCert := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host",
Ips: []*net.IPNet{&ipNet},
Subnets: []*net.IPNet{},
NotBefore: now,
NotAfter: now.Add(60 * time.Second),
PublicKey: pubCrt,
IsCA: false,
Issuer: "ca",
},
}
peerCert.Sign(cert.Curve_CURVE25519, privCA)
cs := &CertState{
rawCertificate: []byte{},
privateKey: []byte{},
certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{},
}
lh := newTestLighthouse()
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.Conn{},
firewall: &Firewall{},
lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
l: l,
disconnectInvalid: true,
caPool: ncp,
}
ifce.certState.Store(cs)
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) nc := newConnectionManager(ifce, 5, 10)
defer cancel() nc.HandleMonitorTick(now)
punchy := NewPunchyFromConfig(l, config.NewC(l)) // Add an ip we have established a connection w/ to hostmap
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy) hostinfo := nc.hostMap.AddVpnIP(vpnIP)
ifce.connectionManager = nc
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, certState: cs,
peerCert: &peerCert,
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
messageCounter: new(uint64),
} }
// Move ahead 45s. // We saw traffic out to vpnIP
// Check if to disconnect with invalid certificate. nc.Out(vpnIP)
// Should be alive. assert.NotContains(t, nc.pendingDeletion, vpnIP)
nextTick := now.Add(45 * time.Second) assert.Contains(t, nc.hostMap.Hosts, vpnIP)
invalid := nc.isInvalidCertificate(nextTick, hostinfo) // Move ahead 5s. Nothing should happen
assert.False(t, invalid) next_tick := now.Add(5 * time.Second)
nc.HandleMonitorTick(next_tick)
nc.HandleDeletionTick(next_tick)
// Move ahead 6s. We haven't heard back
next_tick = now.Add(6 * time.Second)
nc.HandleMonitorTick(next_tick)
nc.HandleDeletionTick(next_tick)
// This host should now be up for deletion
assert.Contains(t, nc.pendingDeletion, vpnIP)
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
// We heard back this time
nc.In(vpnIP)
// Move ahead some more
next_tick = now.Add(45 * time.Second)
nc.HandleMonitorTick(next_tick)
nc.HandleDeletionTick(next_tick)
// The host should be evicted
assert.NotContains(t, nc.pendingDeletion, vpnIP)
assert.Contains(t, nc.hostMap.Hosts, vpnIP)
// Move ahead 61s.
// Check if to disconnect with invalid certificate.
// Should be disconnected.
nextTick = now.Add(61 * time.Second)
invalid = nc.isInvalidCertificate(nextTick, hostinfo)
assert.True(t, invalid)
} }

View File

@@ -4,12 +4,9 @@ import (
"crypto/rand" "crypto/rand"
"encoding/json" "encoding/json"
"sync" "sync"
"sync/atomic"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/noiseutil"
) )
const ReplayWindow = 1024 const ReplayWindow = 1024
@@ -21,36 +18,25 @@ type ConnectionState struct {
certState *CertState certState *CertState
peerCert *cert.NebulaCertificate peerCert *cert.NebulaCertificate
initiator bool initiator bool
messageCounter atomic.Uint64 messageCounter *uint64
window *Bits window *Bits
queueLock sync.Mutex queueLock sync.Mutex
writeLock sync.Mutex writeLock sync.Mutex
ready bool ready bool
} }
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState { func (f *Interface) newConnectionState(initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
var dhFunc noise.DHFunc cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256)
curCertState := f.certState.Load()
switch curCertState.certificate.Details.Curve {
case cert.Curve_CURVE25519:
dhFunc = noise.DH25519
case cert.Curve_P256:
dhFunc = noiseutil.DHP256
default:
l.Errorf("invalid curve: %s", curCertState.certificate.Details.Curve)
return nil
}
cs := noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
if f.cipher == "chachapoly" { if f.cipher == "chachapoly" {
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256) cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256)
} }
curCertState := f.certState
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey} static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
b := NewBits(ReplayWindow) b := NewBits(ReplayWindow)
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss // Clear out bit 0, we never transmit it and we don't want it showing as packet loss
b.Update(l, 0) b.Update(0)
hs, err := noise.NewHandshakeState(noise.Config{ hs, err := noise.NewHandshakeState(noise.Config{
CipherSuite: cs, CipherSuite: cs,
@@ -73,6 +59,7 @@ func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern
window: b, window: b,
ready: false, ready: false,
certState: curCertState, certState: curCertState,
messageCounter: new(uint64),
} }
return ci return ci
@@ -82,7 +69,7 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
return json.Marshal(m{ return json.Marshal(m{
"certificate": cs.peerCert, "certificate": cs.peerCert,
"initiator": cs.initiator, "initiator": cs.initiator,
"message_counter": cs.messageCounter.Load(), "message_counter": cs.messageCounter,
"ready": cs.ready, "ready": cs.ready,
}) })
} }

View File

@@ -1,7 +1,8 @@
package nebula package nebula
import ( import (
"context" "encoding/binary"
"fmt"
"net" "net"
"os" "os"
"os/signal" "os/signal"
@@ -9,9 +10,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/header" "golang.org/x/net/ipv4"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching // Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
@@ -20,61 +19,43 @@ import (
type Control struct { type Control struct {
f *Interface f *Interface
l *logrus.Logger l *logrus.Logger
cancel context.CancelFunc
sshStart func()
statsStart func()
dnsStart func()
} }
type ControlHostInfo struct { type ControlHostInfo struct {
VpnIp net.IP `json:"vpnIp"` VpnIP net.IP `json:"vpnIp"`
LocalIndex uint32 `json:"localIndex"` LocalIndex uint32 `json:"localIndex"`
RemoteIndex uint32 `json:"remoteIndex"` RemoteIndex uint32 `json:"remoteIndex"`
RemoteAddrs []*udp.Addr `json:"remoteAddrs"` RemoteAddrs []udpAddr `json:"remoteAddrs"`
CachedPackets int `json:"cachedPackets"` CachedPackets int `json:"cachedPackets"`
Cert *cert.NebulaCertificate `json:"cert"` Cert *cert.NebulaCertificate `json:"cert"`
MessageCounter uint64 `json:"messageCounter"` MessageCounter uint64 `json:"messageCounter"`
CurrentRemote *udp.Addr `json:"currentRemote"` CurrentRemote udpAddr `json:"currentRemote"`
CurrentRelaysToMe []iputil.VpnIp `json:"currentRelaysToMe"`
CurrentRelaysThroughMe []iputil.VpnIp `json:"currentRelaysThroughMe"`
} }
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock() // Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
func (c *Control) Start() { func (c *Control) Start() {
// Activate the interface
c.f.activate()
// Call all the delayed funcs that waited patiently for the interface to be created.
if c.sshStart != nil {
go c.sshStart()
}
if c.statsStart != nil {
go c.statsStart()
}
if c.dnsStart != nil {
go c.dnsStart()
}
// Start reading packets.
c.f.run() c.f.run()
} }
// Stop signals nebula to shutdown, returns after the shutdown is complete // Stop signals nebula to shutdown, returns after the shutdown is complete
func (c *Control) Stop() { func (c *Control) Stop() {
// Stop the handshakeManager (and other services), to prevent new tunnels from //TODO: stop tun and udp routines, the lock on hostMap effectively does that though
// being created while we're shutting them all down. //TODO: this is probably better as a function in ConnectionManager or HostMap directly
c.cancel() c.f.hostMap.Lock()
for _, h := range c.f.hostMap.Hosts {
c.CloseAllTunnels(false) if h.ConnectionState.ready {
if err := c.f.Close(); err != nil { c.f.send(closeTunnel, 0, h.ConnectionState, h, h.remote, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
c.l.WithError(err).Error("Close interface failed") c.l.WithField("vpnIp", IntIp(h.hostId)).WithField("udpAddr", h.remote).
Debug("Sending close tunnel message")
} }
}
c.f.hostMap.Unlock()
c.l.Info("Goodbye") c.l.Info("Goodbye")
} }
// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled // ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled
func (c *Control) ShutdownBlock() { func (c *Control) ShutdownBlock() {
sigChan := make(chan os.Signal, 1) sigChan := make(chan os.Signal)
signal.Notify(sigChan, syscall.SIGTERM) signal.Notify(sigChan, syscall.SIGTERM)
signal.Notify(sigChan, syscall.SIGINT) signal.Notify(sigChan, syscall.SIGINT)
@@ -87,34 +68,31 @@ func (c *Control) ShutdownBlock() {
// RebindUDPServer asks the UDP listener to rebind it's listener. Mainly used on mobile clients when interfaces change // RebindUDPServer asks the UDP listener to rebind it's listener. Mainly used on mobile clients when interfaces change
func (c *Control) RebindUDPServer() { func (c *Control) RebindUDPServer() {
_ = c.f.outside.Rebind() _ = c.f.outside.Rebind()
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
c.f.lightHouse.SendUpdate(c.f)
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
c.f.rebindCount++
} }
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip // ListHostmap returns details about the actual or pending (handshaking) hostmap
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo { func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo {
var hm *HostMap
if pendingMap { if pendingMap {
return listHostMapHosts(c.f.handshakeManager.pendingHostMap) hm = c.f.handshakeManager.pendingHostMap
} else { } else {
return listHostMapHosts(c.f.hostMap) hm = c.f.hostMap
} }
hm.RLock()
hosts := make([]ControlHostInfo, len(hm.Hosts))
i := 0
for _, v := range hm.Hosts {
hosts[i] = copyHostInfo(v)
i++
}
hm.RUnlock()
return hosts
} }
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id // GetHostInfoByVpnIP returns a single tunnels hostInfo, or nil if not found
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo { func (c *Control) GetHostInfoByVpnIP(vpnIP uint32, pending bool) *ControlHostInfo {
if pendingMap {
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)
} else {
return listHostMapIndexes(c.f.hostMap)
}
}
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
var hm *HostMap var hm *HostMap
if pending { if pending {
hm = c.f.handshakeManager.pendingHostMap hm = c.f.handshakeManager.pendingHostMap
@@ -122,40 +100,41 @@ func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlH
hm = c.f.hostMap hm = c.f.hostMap
} }
h, err := hm.QueryVpnIp(vpnIp) h, err := hm.QueryVpnIP(vpnIP)
if err != nil { if err != nil {
return nil return nil
} }
ch := copyHostInfo(h, c.f.hostMap.preferredRanges) ch := copyHostInfo(h)
return &ch return &ch
} }
// SetRemoteForTunnel forces a tunnel to use a specific remote // SetRemoteForTunnel forces a tunnel to use a specific remote
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo { func (c *Control) SetRemoteForTunnel(vpnIP uint32, addr udpAddr) *ControlHostInfo {
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp) hostInfo, err := c.f.hostMap.QueryVpnIP(vpnIP)
if err != nil { if err != nil {
return nil return nil
} }
hostInfo.SetRemote(addr.Copy()) hostInfo.SetRemote(addr.Copy())
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges) ch := copyHostInfo(hostInfo)
return &ch return &ch
} }
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well. // CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool { func (c *Control) CloseTunnel(vpnIP uint32, localOnly bool) bool {
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp) hostInfo, err := c.f.hostMap.QueryVpnIP(vpnIP)
if err != nil { if err != nil {
return false return false
} }
if !localOnly { if !localOnly {
c.f.send( c.f.send(
header.CloseTunnel, closeTunnel,
0, 0,
hostInfo.ConnectionState, hostInfo.ConnectionState,
hostInfo, hostInfo,
hostInfo.remote,
[]byte{}, []byte{},
make([]byte, 12, 12), make([]byte, 12, 12),
make([]byte, mtu), make([]byte, mtu),
@@ -166,68 +145,15 @@ func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
return true return true
} }
// CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels func copyHostInfo(h *HostInfo) ControlHostInfo {
// the int returned is a count of tunnels closed addrs := h.RemoteUDPAddrs()
func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
//TODO: this is probably better as a function in ConnectionManager or HostMap directly
lighthouses := c.f.lightHouse.GetLighthouses()
shutdown := func(h *HostInfo) {
if excludeLighthouses {
if _, ok := lighthouses[h.vpnIp]; ok {
return
}
}
c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
c.f.closeTunnel(h)
c.l.WithField("vpnIp", h.vpnIp).WithField("udpAddr", h.remote).
Debug("Sending close tunnel message")
closed++
}
// Learn which hosts are being used as relays, so we can shut them down last.
relayingHosts := map[iputil.VpnIp]*HostInfo{}
// Grab the hostMap lock to access the Relays map
c.f.hostMap.Lock()
for _, relayingHost := range c.f.hostMap.Relays {
relayingHosts[relayingHost.vpnIp] = relayingHost
}
c.f.hostMap.Unlock()
hostInfos := []*HostInfo{}
// Grab the hostMap lock to access the Hosts map
c.f.hostMap.Lock()
for _, relayHost := range c.f.hostMap.Indexes {
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
hostInfos = append(hostInfos, relayHost)
}
}
c.f.hostMap.Unlock()
for _, h := range hostInfos {
shutdown(h)
}
for _, h := range relayingHosts {
shutdown(h)
}
return
}
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
chi := ControlHostInfo{ chi := ControlHostInfo{
VpnIp: h.vpnIp.ToIP(), VpnIP: int2ip(h.hostId),
LocalIndex: h.localIndexId, LocalIndex: h.localIndexId,
RemoteIndex: h.remoteIndexId, RemoteIndex: h.remoteIndexId,
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges), RemoteAddrs: make([]udpAddr, len(addrs), len(addrs)),
CachedPackets: len(h.packetStore), CachedPackets: len(h.packetStore),
CurrentRelaysToMe: h.relayState.CopyRelayIps(), MessageCounter: *h.ConnectionState.messageCounter,
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
}
if h.ConnectionState != nil {
chi.MessageCounter = h.ConnectionState.messageCounter.Load()
} }
if c := h.GetCert(); c != nil { if c := h.GetCert(); c != nil {
@@ -235,34 +161,56 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
} }
if h.remote != nil { if h.remote != nil {
chi.CurrentRemote = h.remote.Copy() chi.CurrentRemote = *h.remote
}
for i, addr := range addrs {
chi.RemoteAddrs[i] = addr.Copy()
} }
return chi return chi
} }
func listHostMapHosts(hm *HostMap) []ControlHostInfo { // Hook provides the ability to hook into the network path for a particular
hm.RLock() // message sub type. Any received message of that subtype that is allowed by
hosts := make([]ControlHostInfo, len(hm.Hosts)) // the firewall will be written to the provided write func instead of the
i := 0 // inside interface.
for _, v := range hm.Hosts { // TODO: make this an io.Writer
hosts[i] = copyHostInfo(v, hm.preferredRanges) func (c *Control) Hook(t NebulaMessageSubType, w func([]byte) error) error {
i++ if t == 0 {
return fmt.Errorf("non-default message subtype must be specified")
}
if _, ok := c.f.handlers[Version][message][t]; ok {
return fmt.Errorf("message subtype %d already hooked", t)
} }
hm.RUnlock()
return hosts c.f.handlers[Version][message][t] = c.f.newHook(w)
return nil
} }
func listHostMapIndexes(hm *HostMap) []ControlHostInfo { // Send provides the ability to send arbitrary message packets to peer nodes.
hm.RLock() // The provided payload will be encapsulated in a Nebula Firewall packet
hosts := make([]ControlHostInfo, len(hm.Indexes)) // (IPv4 plus ports) from the node IP to the provided destination nebula IP.
i := 0 // Any protocol handling above layer 3 (IP) must be managed by the caller.
for _, v := range hm.Indexes { func (c *Control) Send(ip uint32, port uint16, st NebulaMessageSubType, payload []byte) {
hosts[i] = copyHostInfo(v, hm.preferredRanges) headerLen := ipv4.HeaderLen + minFwPacketLen
i++ length := headerLen + len(payload)
} packet := make([]byte, length)
hm.RUnlock() packet[0] = 0x45 // IPv4 HL=20
packet[9] = 114 // Declare as arbitrary 0-hop protocol
binary.BigEndian.PutUint16(packet[2:4], uint16(length))
binary.BigEndian.PutUint32(packet[12:16], ip2int(c.f.inside.CidrNet().IP.To4()))
binary.BigEndian.PutUint32(packet[16:20], ip)
return hosts // Set identical values for src and dst port as they're only
// used for nebula firewall rule/conntrack matching.
binary.BigEndian.PutUint16(packet[20:22], port)
binary.BigEndian.PutUint16(packet[22:24], port)
copy(packet[headerLen:], payload)
fp := &FirewallPacket{}
nb := make([]byte, 12)
out := make([]byte, mtu)
c.f.consumeInsidePacket(st, packet, fp, nb, out)
} }

View File

@@ -8,26 +8,23 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/util"
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestControl_GetHostInfoByVpnIp(t *testing.T) { func TestControl_GetHostInfoByVpnIP(t *testing.T) {
l := test.NewLogger()
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object // Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
// To properly ensure we are not exposing core memory to the caller // To properly ensure we are not exposing core memory to the caller
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0)) hm := NewHostMap("test", &net.IPNet{}, make([]*net.IPNet, 0))
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444) remote1 := NewUDPAddr(100, 4444)
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444) remote2 := NewUDPAddr(101, 4444)
ipNet := net.IPNet{ ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4), IP: net.IPv4(1, 2, 3, 4),
Mask: net.IPMask{255, 255, 255, 0}, Mask: net.IPMask{255, 255, 255, 0},
} }
ipNet2 := net.IPNet{ ipNet2 := net.IPNet{
IP: net.ParseIP("1:2:3:4:5:6:7:8"), IP: net.IPv4(1, 2, 3, 5),
Mask: net.IPMask{255, 255, 255, 0}, Mask: net.IPMask{255, 255, 255, 0},
} }
@@ -46,40 +43,31 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
}, },
Signature: []byte{1, 2, 1, 2, 1, 3}, Signature: []byte{1, 2, 1, 2, 1, 3},
} }
counter := uint64(0)
remotes := NewRemoteList(nil) remotes := []*HostInfoDest{NewHostInfoDest(remote1), NewHostInfoDest(remote2)}
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port))) hm.Add(ip2int(ipNet.IP), &HostInfo{
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
remote: remote1, remote: remote1,
remotes: remotes, Remotes: remotes,
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: crt, peerCert: crt,
messageCounter: &counter,
}, },
remoteIndexId: 200, remoteIndexId: 200,
localIndexId: 201, localIndexId: 201,
vpnIp: iputil.Ip2VpnIp(ipNet.IP), hostId: ip2int(ipNet.IP),
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
}) })
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{ hm.Add(ip2int(ipNet2.IP), &HostInfo{
remote: remote1, remote: remote1,
remotes: remotes, Remotes: remotes,
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: nil, peerCert: nil,
messageCounter: &counter,
}, },
remoteIndexId: 200, remoteIndexId: 200,
localIndexId: 201, localIndexId: 201,
vpnIp: iputil.Ip2VpnIp(ipNet2.IP), hostId: ip2int(ipNet2.IP),
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
}) })
c := Control{ c := Control{
@@ -89,28 +77,26 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
l: logrus.New(), l: logrus.New(),
} }
thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false) thi := c.GetHostInfoByVpnIP(ip2int(ipNet.IP), false)
expectedInfo := ControlHostInfo{ expectedInfo := ControlHostInfo{
VpnIp: net.IPv4(1, 2, 3, 4).To4(), VpnIP: net.IPv4(1, 2, 3, 4).To4(),
LocalIndex: 201, LocalIndex: 201,
RemoteIndex: 200, RemoteIndex: 200,
RemoteAddrs: []*udp.Addr{remote2, remote1}, RemoteAddrs: []udpAddr{*remote1, *remote2},
CachedPackets: 0, CachedPackets: 0,
Cert: crt.Copy(), Cert: crt.Copy(),
MessageCounter: 0, MessageCounter: 0,
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444), CurrentRemote: *NewUDPAddr(100, 4444),
CurrentRelaysToMe: []iputil.VpnIp{},
CurrentRelaysThroughMe: []iputil.VpnIp{},
} }
// Make sure we don't have any unexpected fields // Make sure we don't have any unexpected fields
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi) assertFields(t, []string{"VpnIP", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote"}, thi)
test.AssertDeepCopyEqual(t, &expectedInfo, thi) util.AssertDeepCopyEqual(t, &expectedInfo, thi)
// Make sure we don't panic if the host info doesn't have a cert yet // Make sure we don't panic if the host info doesn't have a cert yet
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false) thi = c.GetHostInfoByVpnIP(ip2int(ipNet2.IP), false)
}) })
} }

View File

@@ -1,179 +0,0 @@
//go:build e2e_testing
// +build e2e_testing
package nebula
import (
"net"
"github.com/slackhq/nebula/cert"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/udp"
)
// WaitForType will pipe all messages from this control device into the pipeTo control device
// returning after a message matching the criteria has been piped
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{}
for {
p := c.f.outside.Get(true)
if err := h.Parse(p.Data); err != nil {
panic(err)
}
pipeTo.InjectUDPPacket(p)
if h.Type == msgType && h.Subtype == subType {
return
}
}
}
// WaitForTypeByIndex is similar to WaitForType except it adds an index check
// Useful if you have many nodes communicating and want to wait to find a specific nodes packet
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{}
for {
p := c.f.outside.Get(true)
if err := h.Parse(p.Data); err != nil {
panic(err)
}
pipeTo.InjectUDPPacket(p)
if h.RemoteIndex == toIndex && h.Type == msgType && h.Subtype == subType {
return
}
}
}
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
// This is necessary if you did not configure static hosts or are not running a lighthouse
func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) {
c.f.lightHouse.Lock()
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
remoteList.Lock()
defer remoteList.Unlock()
c.f.lightHouse.Unlock()
iVpnIp := iputil.Ip2VpnIp(vpnIp)
if v4 := toAddr.IP.To4(); v4 != nil {
remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port)))
} else {
remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port)))
}
}
// InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp
// This is necessary to inform an initiator of possible relays for communicating with a responder
func (c *Control) InjectRelays(vpnIp net.IP, relayVpnIps []net.IP) {
c.f.lightHouse.Lock()
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
remoteList.Lock()
defer remoteList.Unlock()
c.f.lightHouse.Unlock()
iVpnIp := iputil.Ip2VpnIp(vpnIp)
uVpnIp := []uint32{}
for _, rVPnIp := range relayVpnIps {
uVpnIp = append(uVpnIp, uint32(iputil.Ip2VpnIp(rVPnIp)))
}
remoteList.unlockedSetRelay(iVpnIp, iVpnIp, uVpnIp)
}
// GetFromTun will pull a packet off the tun side of nebula
func (c *Control) GetFromTun(block bool) []byte {
return c.f.inside.(*overlay.TestTun).Get(block)
}
// GetFromUDP will pull a udp packet off the udp side of nebula
func (c *Control) GetFromUDP(block bool) *udp.Packet {
return c.f.outside.Get(block)
}
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
return c.f.outside.TxPackets
}
func (c *Control) GetTunTxChan() <-chan []byte {
return c.f.inside.(*overlay.TestTun).TxPackets
}
// InjectUDPPacket will inject a packet into the udp side of nebula
func (c *Control) InjectUDPPacket(p *udp.Packet) {
c.f.outside.Send(p)
}
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) {
ip := layers.IPv4{
Version: 4,
TTL: 64,
Protocol: layers.IPProtocolUDP,
SrcIP: c.f.inside.Cidr().IP,
DstIP: toIp,
}
udp := layers.UDP{
SrcPort: layers.UDPPort(fromPort),
DstPort: layers.UDPPort(toPort),
}
err := udp.SetNetworkLayerForChecksum(&ip)
if err != nil {
panic(err)
}
buffer := gopacket.NewSerializeBuffer()
opt := gopacket.SerializeOptions{
ComputeChecksums: true,
FixLengths: true,
}
err = gopacket.SerializeLayers(buffer, opt, &ip, &udp, gopacket.Payload(data))
if err != nil {
panic(err)
}
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
}
func (c *Control) GetVpnIp() iputil.VpnIp {
return c.f.myVpnIp
}
func (c *Control) GetUDPAddr() string {
return c.f.outside.Addr.String()
}
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
if !ok {
return false
}
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
return true
}
func (c *Control) GetHostmap() *HostMap {
return c.f.hostMap
}
func (c *Control) GetCert() *cert.NebulaCertificate {
return c.f.certState.Load().certificate
}
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
hostinfo := c.f.handshakeManager.AddVpnIp(vpnIp, c.f.initHostInfo)
ixHandshakeStage0(c.f, vpnIp, hostinfo)
// If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now
if _, ok := c.f.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
select {
case c.f.handshakeManager.trigger <- hostinfo.vpnIp:
default:
}
}
}

View File

@@ -1,10 +1,12 @@
[Unit] [Unit]
Description=Nebula overlay networking tool Description=nebula
Wants=basic.target network-online.target nss-lookup.target time-sync.target Wants=basic.target network-online.target
After=basic.target network.target network-online.target After=basic.target network.target network-online.target
[Service] [Service]
SyslogIdentifier=nebula SyslogIdentifier=nebula
StandardOutput=syslog
StandardError=syslog
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
Restart=always Restart=always

View File

@@ -1,14 +0,0 @@
[Unit]
Description=Nebula overlay networking tool
Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target network-online.target
Before=sshd.service
[Service]
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,84 +0,0 @@
Prebuilt Binaries License
-------------------------
1. DEFINITIONS. "Software" means the precise contents of the "wintun.dll"
files that are included in the .zip file that contains this document as
downloaded from wintun.net/builds.
2. LICENSE GRANT. WireGuard LLC grants to you a non-exclusive and
non-transferable right to use Software for lawful purposes under certain
obligations and limited rights as set forth in this agreement.
3. RESTRICTIONS. Software is owned and copyrighted by WireGuard LLC. It is
licensed, not sold. Title to Software and all associated intellectual
property rights are retained by WireGuard. You must not:
a. reverse engineer, decompile, disassemble, extract from, or otherwise
modify the Software;
b. modify or create derivative work based upon Software in whole or in
parts, except insofar as only the API interfaces of the "wintun.h" file
distributed alongside the Software (the "Permitted API") are used;
c. remove any proprietary notices, labels, or copyrights from the Software;
d. resell, redistribute, lease, rent, transfer, sublicense, or otherwise
transfer rights of the Software without the prior written consent of
WireGuard LLC, except insofar as the Software is distributed alongside
other software that uses the Software only via the Permitted API;
e. use the name of WireGuard LLC, the WireGuard project, the Wintun
project, or the names of its contributors to endorse or promote products
derived from the Software without specific prior written consent.
4. LIMITED WARRANTY. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF
ANY KIND. WIREGUARD LLC HEREBY EXCLUDES AND DISCLAIMS ALL IMPLIED OR
STATUTORY WARRANTIES, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE, QUALITY, NON-INFRINGEMENT, TITLE, RESULTS,
EFFORTS, OR QUIET ENJOYMENT. THERE IS NO WARRANTY THAT THE PRODUCT WILL BE
ERROR-FREE OR WILL FUNCTION WITHOUT INTERRUPTION. YOU ASSUME THE ENTIRE
RISK FOR THE RESULTS OBTAINED USING THE PRODUCT. TO THE EXTENT THAT
WIREGUARD LLC MAY NOT DISCLAIM ANY WARRANTY AS A MATTER OF APPLICABLE LAW,
THE SCOPE AND DURATION OF SUCH WARRANTY WILL BE THE MINIMUM PERMITTED UNDER
SUCH LAW. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND
WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE OR NON-INFRINGEMENT ARE DISCLAIMED, EXCEPT TO THE
EXTENT THAT THESE DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
5. LIMITATION OF LIABILITY. To the extent not prohibited by law, in no event
WireGuard LLC or any third-party-developer will be liable for any lost
revenue, profit or data or for special, indirect, consequential, incidental
or punitive damages, however caused regardless of the theory of liability,
arising out of or related to the use of or inability to use Software, even
if WireGuard LLC has been advised of the possibility of such damages.
Solely you are responsible for determining the appropriateness of using
Software and accept full responsibility for all risks associated with its
exercise of rights under this agreement, including but not limited to the
risks and costs of program errors, compliance with applicable laws, damage
to or loss of data, programs or equipment, and unavailability or
interruption of operations. The foregoing limitations will apply even if
the above stated warranty fails of its essential purpose. You acknowledge,
that it is in the nature of software that software is complex and not
completely free of errors. In no event shall WireGuard LLC or any
third-party-developer be liable to you under any theory for any damages
suffered by you or any user of Software or for any special, incidental,
indirect, consequential or similar damages (including without limitation
damages for loss of business profits, business interruption, loss of
business information or any other pecuniary loss) arising out of the use or
inability to use Software, even if WireGuard LLC has been advised of the
possibility of such damages and regardless of the legal or quitable theory
(contract, tort, or otherwise) upon which the claim is based.
6. TERMINATION. This agreement is affected until terminated. You may
terminate this agreement at any time. This agreement will terminate
immediately without notice from WireGuard LLC if you fail to comply with
the terms and conditions of this agreement. Upon termination, you must
delete Software and all copies of Software and cease all forms of
distribution of Software.
7. SEVERABILITY. If any provision of this agreement is held to be
unenforceable, this agreement will remain in effect with the provision
omitted, unless omission would frustrate the intent of the parties, in
which case this agreement will immediately terminate.
8. RESERVATION OF RIGHTS. All rights not expressly granted in this agreement
are reserved by WireGuard LLC. For example, WireGuard LLC reserves the
right at any time to cease development of Software, to alter distribution
details, features, specifications, capabilities, functions, licensing
terms, release dates, APIs, ABIs, general availability, or other
characteristics of the Software.

View File

@@ -1,339 +0,0 @@
# [Wintun Network Adapter](https://www.wintun.net/)
### TUN Device Driver for Windows
This is a layer 3 TUN driver for Windows 7, 8, 8.1, and 10. Originally created for [WireGuard](https://www.wireguard.com/), it is intended to be useful to a wide variety of projects that require layer 3 tunneling devices with implementations primarily in userspace.
## Installation
Wintun is deployed as a platform-specific `wintun.dll` file. Install the `wintun.dll` file side-by-side with your application. Download the dll from [wintun.net](https://www.wintun.net/), alongside the header file for your application described below.
## Usage
Include the [`wintun.h` file](https://git.zx2c4.com/wintun/tree/api/wintun.h) in your project simply by copying it there and dynamically load the `wintun.dll` using [`LoadLibraryEx()`](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexa) and [`GetProcAddress()`](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getprocaddress) to resolve each function, using the typedefs provided in the header file. The [`InitializeWintun` function in the example.c code](https://git.zx2c4.com/wintun/tree/example/example.c) provides this in a function that you can simply copy and paste.
With the library setup, Wintun can then be used by first creating an adapter, configuring it, and then setting its status to "up". Adapters have names (e.g. "OfficeNet") and types (e.g. "Wintun").
```C
WINTUN_ADAPTER_HANDLE Adapter1 = WintunCreateAdapter(L"OfficeNet", L"Wintun", &SomeFixedGUID1);
WINTUN_ADAPTER_HANDLE Adapter2 = WintunCreateAdapter(L"HomeNet", L"Wintun", &SomeFixedGUID2);
WINTUN_ADAPTER_HANDLE Adapter3 = WintunCreateAdapter(L"Data Center", L"Wintun", &SomeFixedGUID3);
```
After creating an adapter, we can use it by starting a session:
```C
WINTUN_SESSION_HANDLE Session = WintunStartSession(Adapter2, 0x400000);
```
Then, the `WintunAllocateSendPacket` and `WintunSendPacket` functions can be used for sending packets ([used by `SendPackets` in the example.c code](https://git.zx2c4.com/wintun/tree/example/example.c)):
```C
BYTE *OutgoingPacket = WintunAllocateSendPacket(Session, PacketDataSize);
if (OutgoingPacket)
{
memcpy(OutgoingPacket, PacketData, PacketDataSize);
WintunSendPacket(Session, OutgoingPacket);
}
else if (GetLastError() != ERROR_BUFFER_OVERFLOW) // Silently drop packets if the ring is full
Log(L"Packet write failed");
```
And the `WintunReceivePacket` and `WintunReleaseReceivePacket` functions can be used for receiving packets ([used by `ReceivePackets` in the example.c code](https://git.zx2c4.com/wintun/tree/example/example.c)):
```C
for (;;)
{
DWORD IncomingPacketSize;
BYTE *IncomingPacket = WintunReceivePacket(Session, &IncomingPacketSize);
if (IncomingPacket)
{
DoSomethingWithPacket(IncomingPacket, IncomingPacketSize);
WintunReleaseReceivePacket(Session, IncomingPacket);
}
else if (GetLastError() == ERROR_NO_MORE_ITEMS)
WaitForSingleObject(WintunGetReadWaitEvent(Session), INFINITE);
else
{
Log(L"Packet read failed");
break;
}
}
```
Some high performance use cases may want to spin on `WintunReceivePackets` for a number of cycles before falling back to waiting on the read-wait event.
You are **highly encouraged** to read the [**example.c short example**](https://git.zx2c4.com/wintun/tree/example/example.c) to see how to put together a simple userspace network tunnel.
The various functions and definitions are [documented in the reference below](#Reference).
## Reference
### Macro Definitions
#### WINTUN\_MAX\_POOL
`#define WINTUN_MAX_POOL 256`
Maximum pool name length including zero terminator
#### WINTUN\_MIN\_RING\_CAPACITY
`#define WINTUN_MIN_RING_CAPACITY 0x20000 /* 128kiB */`
Minimum ring capacity.
#### WINTUN\_MAX\_RING\_CAPACITY
`#define WINTUN_MAX_RING_CAPACITY 0x4000000 /* 64MiB */`
Maximum ring capacity.
#### WINTUN\_MAX\_IP\_PACKET\_SIZE
`#define WINTUN_MAX_IP_PACKET_SIZE 0xFFFF`
Maximum IP packet size
### Typedefs
#### WINTUN\_ADAPTER\_HANDLE
`typedef void* WINTUN_ADAPTER_HANDLE`
A handle representing Wintun adapter
#### WINTUN\_ENUM\_CALLBACK
`typedef BOOL(* WINTUN_ENUM_CALLBACK) (WINTUN_ADAPTER_HANDLE Adapter, LPARAM Param)`
Called by WintunEnumAdapters for each adapter in the pool.
**Parameters**
- *Adapter*: Adapter handle, which will be freed when this function returns.
- *Param*: An application-defined value passed to the WintunEnumAdapters.
**Returns**
Non-zero to continue iterating adapters; zero to stop.
#### WINTUN\_LOGGER\_CALLBACK
`typedef void(* WINTUN_LOGGER_CALLBACK) (WINTUN_LOGGER_LEVEL Level, DWORD64 Timestamp, const WCHAR *Message)`
Called by internal logger to report diagnostic messages
**Parameters**
- *Level*: Message level.
- *Timestamp*: Message timestamp in in 100ns intervals since 1601-01-01 UTC.
- *Message*: Message text.
#### WINTUN\_SESSION\_HANDLE
`typedef void* WINTUN_SESSION_HANDLE`
A handle representing Wintun session
### Enumeration Types
#### WINTUN\_LOGGER\_LEVEL
`enum WINTUN_LOGGER_LEVEL`
Determines the level of logging, passed to WINTUN\_LOGGER\_CALLBACK.
- *WINTUN\_LOG\_INFO*: Informational
- *WINTUN\_LOG\_WARN*: Warning
- *WINTUN\_LOG\_ERR*: Error
Enumerator
### Functions
#### WintunCreateAdapter()
`WINTUN_ADAPTER_HANDLE WintunCreateAdapter (const WCHAR * Name, const WCHAR * TunnelType, const GUID * RequestedGUID)`
Creates a new Wintun adapter.
**Parameters**
- *Name*: The requested name of the adapter. Zero-terminated string of up to MAX\_ADAPTER\_NAME-1 characters.
- *Name*: Name of the adapter tunnel type. Zero-terminated string of up to MAX\_ADAPTER\_NAME-1 characters.
- *RequestedGUID*: The GUID of the created network adapter, which then influences NLA generation deterministically. If it is set to NULL, the GUID is chosen by the system at random, and hence a new NLA entry is created for each new adapter. It is called "requested" GUID because the API it uses is completely undocumented, and so there could be minor interesting complications with its usage.
**Returns**
If the function succeeds, the return value is the adapter handle. Must be released with WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call GetLastError.
#### WintunOpenAdapter()
`WINTUN_ADAPTER_HANDLE WintunOpenAdapter (const WCHAR * Name)`
Opens an existing Wintun adapter.
**Parameters**
- *Name*: The requested name of the adapter. Zero-terminated string of up to MAX\_ADAPTER\_NAME-1 characters.
**Returns**
If the function succeeds, the return value is adapter handle. Must be released with WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call GetLastError.
#### WintunCloseAdapter()
`void WintunCloseAdapter (WINTUN_ADAPTER_HANDLE Adapter)`
Releases Wintun adapter resources and, if adapter was created with WintunCreateAdapter, removes adapter.
**Parameters**
- *Adapter*: Adapter handle obtained with WintunCreateAdapter or WintunOpenAdapter.
#### WintunDeleteDriver()
`BOOL WintunDeleteDriver ()`
Deletes the Wintun driver if there are no more adapters in use.
**Returns**
If the function succeeds, the return value is nonzero. If the function fails, the return value is zero. To get extended error information, call GetLastError.
#### WintunGetAdapterLuid()
`void WintunGetAdapterLuid (WINTUN_ADAPTER_HANDLE Adapter, NET_LUID * Luid)`
Returns the LUID of the adapter.
**Parameters**
- *Adapter*: Adapter handle obtained with WintunOpenAdapter or WintunCreateAdapter
- *Luid*: Pointer to LUID to receive adapter LUID.
#### WintunGetRunningDriverVersion()
`DWORD WintunGetRunningDriverVersion (void )`
Determines the version of the Wintun driver currently loaded.
**Returns**
If the function succeeds, the return value is the version number. If the function fails, the return value is zero. To get extended error information, call GetLastError. Possible errors include the following: ERROR\_FILE\_NOT\_FOUND Wintun not loaded
#### WintunSetLogger()
`void WintunSetLogger (WINTUN_LOGGER_CALLBACK NewLogger)`
Sets logger callback function.
**Parameters**
- *NewLogger*: Pointer to callback function to use as a new global logger. NewLogger may be called from various threads concurrently. Should the logging require serialization, you must handle serialization in NewLogger. Set to NULL to disable.
#### WintunStartSession()
`WINTUN_SESSION_HANDLE WintunStartSession (WINTUN_ADAPTER_HANDLE Adapter, DWORD Capacity)`
Starts Wintun session.
**Parameters**
- *Adapter*: Adapter handle obtained with WintunOpenAdapter or WintunCreateAdapter
- *Capacity*: Rings capacity. Must be between WINTUN\_MIN\_RING\_CAPACITY and WINTUN\_MAX\_RING\_CAPACITY (incl.) Must be a power of two.
**Returns**
Wintun session handle. Must be released with WintunEndSession. If the function fails, the return value is NULL. To get extended error information, call GetLastError.
#### WintunEndSession()
`void WintunEndSession (WINTUN_SESSION_HANDLE Session)`
Ends Wintun session.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
#### WintunGetReadWaitEvent()
`HANDLE WintunGetReadWaitEvent (WINTUN_SESSION_HANDLE Session)`
Gets Wintun session's read-wait event handle.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
**Returns**
Pointer to receive event handle to wait for available data when reading. Should WintunReceivePackets return ERROR\_NO\_MORE\_ITEMS (after spinning on it for a while under heavy load), wait for this event to become signaled before retrying WintunReceivePackets. Do not call CloseHandle on this event - it is managed by the session.
#### WintunReceivePacket()
`BYTE* WintunReceivePacket (WINTUN_SESSION_HANDLE Session, DWORD * PacketSize)`
Retrieves one or packet. After the packet content is consumed, call WintunReleaseReceivePacket with Packet returned from this function to release internal buffer. This function is thread-safe.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *PacketSize*: Pointer to receive packet size.
**Returns**
Pointer to layer 3 IPv4 or IPv6 packet. Client may modify its content at will. If the function fails, the return value is NULL. To get extended error information, call GetLastError. Possible errors include the following: ERROR\_HANDLE\_EOF Wintun adapter is terminating; ERROR\_NO\_MORE\_ITEMS Wintun buffer is exhausted; ERROR\_INVALID\_DATA Wintun buffer is corrupt
#### WintunReleaseReceivePacket()
`void WintunReleaseReceivePacket (WINTUN_SESSION_HANDLE Session, const BYTE * Packet)`
Releases internal buffer after the received packet has been processed by the client. This function is thread-safe.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *Packet*: Packet obtained with WintunReceivePacket
#### WintunAllocateSendPacket()
`BYTE* WintunAllocateSendPacket (WINTUN_SESSION_HANDLE Session, DWORD PacketSize)`
Allocates memory for a packet to send. After the memory is filled with packet data, call WintunSendPacket to send and release internal buffer. WintunAllocateSendPacket is thread-safe and the WintunAllocateSendPacket order of calls define the packet sending order.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *PacketSize*: Exact packet size. Must be less or equal to WINTUN\_MAX\_IP\_PACKET\_SIZE.
**Returns**
Returns pointer to memory where to prepare layer 3 IPv4 or IPv6 packet for sending. If the function fails, the return value is NULL. To get extended error information, call GetLastError. Possible errors include the following: ERROR\_HANDLE\_EOF Wintun adapter is terminating; ERROR\_BUFFER\_OVERFLOW Wintun buffer is full;
#### WintunSendPacket()
`void WintunSendPacket (WINTUN_SESSION_HANDLE Session, const BYTE * Packet)`
Sends the packet and releases internal buffer. WintunSendPacket is thread-safe, but the WintunAllocateSendPacket order of calls define the packet sending order. This means the packet is not guaranteed to be sent in the WintunSendPacket yet.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *Packet*: Packet obtained with WintunAllocateSendPacket
## Building
**Do not distribute drivers or files named "Wintun", as they will most certainly clash with official deployments. Instead distribute [`wintun.dll` as downloaded from wintun.net](https://www.wintun.net).**
General requirements:
- [Visual Studio 2019](https://visualstudio.microsoft.com/downloads/) with Windows SDK
- [Windows Driver Kit](https://docs.microsoft.com/en-us/windows-hardware/drivers/download-the-wdk)
`wintun.sln` may be opened in Visual Studio for development and building. Be sure to run `bcdedit /set testsigning on` and then reboot before to enable unsigned driver loading. The default run sequence (F5) in Visual Studio will build the example project and its dependencies.
## License
The entire contents of [the repository](https://git.zx2c4.com/wintun/), including all documentation and example code, is "Copyright © 2018-2021 WireGuard LLC. All Rights Reserved." Source code is licensed under the [GPLv2](COPYING). Prebuilt binaries from [wintun.net](https://www.wintun.net/) are released under a more permissive license suitable for more forms of software contained inside of the .zip files distributed there.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,270 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Copyright (C) 2018-2021 WireGuard LLC. All Rights Reserved.
*/
#pragma once
#include <winsock2.h>
#include <windows.h>
#include <ipexport.h>
#include <ifdef.h>
#include <ws2ipdef.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef ALIGNED
# if defined(_MSC_VER)
# define ALIGNED(n) __declspec(align(n))
# elif defined(__GNUC__)
# define ALIGNED(n) __attribute__((aligned(n)))
# else
# error "Unable to define ALIGNED"
# endif
#endif
/* MinGW is missing this one, unfortunately. */
#ifndef _Post_maybenull_
# define _Post_maybenull_
#endif
#pragma warning(push)
#pragma warning(disable : 4324) /* structure was padded due to alignment specifier */
/**
* A handle representing Wintun adapter
*/
typedef struct _WINTUN_ADAPTER *WINTUN_ADAPTER_HANDLE;
/**
* Creates a new Wintun adapter.
*
* @param Name The requested name of the adapter. Zero-terminated string of up to MAX_ADAPTER_NAME-1
* characters.
*
* @param TunnelType Name of the adapter tunnel type. Zero-terminated string of up to MAX_ADAPTER_NAME-1
* characters.
*
* @param RequestedGUID The GUID of the created network adapter, which then influences NLA generation deterministically.
* If it is set to NULL, the GUID is chosen by the system at random, and hence a new NLA entry is
* created for each new adapter. It is called "requested" GUID because the API it uses is
* completely undocumented, and so there could be minor interesting complications with its usage.
*
* @return If the function succeeds, the return value is the adapter handle. Must be released with
* WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call
* GetLastError.
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
WINTUN_ADAPTER_HANDLE(WINAPI WINTUN_CREATE_ADAPTER_FUNC)
(_In_z_ LPCWSTR Name, _In_z_ LPCWSTR TunnelType, _In_opt_ const GUID *RequestedGUID);
/**
* Opens an existing Wintun adapter.
*
* @param Name The requested name of the adapter. Zero-terminated string of up to MAX_ADAPTER_NAME-1
* characters.
*
* @return If the function succeeds, the return value is the adapter handle. Must be released with
* WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call
* GetLastError.
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
WINTUN_ADAPTER_HANDLE(WINAPI WINTUN_OPEN_ADAPTER_FUNC)(_In_z_ LPCWSTR Name);
/**
* Releases Wintun adapter resources and, if adapter was created with WintunCreateAdapter, removes adapter.
*
* @param Adapter Adapter handle obtained with WintunCreateAdapter or WintunOpenAdapter.
*/
typedef VOID(WINAPI WINTUN_CLOSE_ADAPTER_FUNC)(_In_opt_ WINTUN_ADAPTER_HANDLE Adapter);
/**
* Deletes the Wintun driver if there are no more adapters in use.
*
* @return If the function succeeds, the return value is nonzero. If the function fails, the return value is zero. To
* get extended error information, call GetLastError.
*/
typedef _Return_type_success_(return != FALSE)
BOOL(WINAPI WINTUN_DELETE_DRIVER_FUNC)(VOID);
/**
* Returns the LUID of the adapter.
*
* @param Adapter Adapter handle obtained with WintunCreateAdapter or WintunOpenAdapter
*
* @param Luid Pointer to LUID to receive adapter LUID.
*/
typedef VOID(WINAPI WINTUN_GET_ADAPTER_LUID_FUNC)(_In_ WINTUN_ADAPTER_HANDLE Adapter, _Out_ NET_LUID *Luid);
/**
* Determines the version of the Wintun driver currently loaded.
*
* @return If the function succeeds, the return value is the version number. If the function fails, the return value is
* zero. To get extended error information, call GetLastError. Possible errors include the following:
* ERROR_FILE_NOT_FOUND Wintun not loaded
*/
typedef _Return_type_success_(return != 0)
DWORD(WINAPI WINTUN_GET_RUNNING_DRIVER_VERSION_FUNC)(VOID);
/**
* Determines the level of logging, passed to WINTUN_LOGGER_CALLBACK.
*/
typedef enum
{
WINTUN_LOG_INFO, /**< Informational */
WINTUN_LOG_WARN, /**< Warning */
WINTUN_LOG_ERR /**< Error */
} WINTUN_LOGGER_LEVEL;
/**
* Called by internal logger to report diagnostic messages
*
* @param Level Message level.
*
* @param Timestamp Message timestamp in in 100ns intervals since 1601-01-01 UTC.
*
* @param Message Message text.
*/
typedef VOID(CALLBACK *WINTUN_LOGGER_CALLBACK)(
_In_ WINTUN_LOGGER_LEVEL Level,
_In_ DWORD64 Timestamp,
_In_z_ LPCWSTR Message);
/**
* Sets logger callback function.
*
* @param NewLogger Pointer to callback function to use as a new global logger. NewLogger may be called from various
* threads concurrently. Should the logging require serialization, you must handle serialization in
* NewLogger. Set to NULL to disable.
*/
typedef VOID(WINAPI WINTUN_SET_LOGGER_FUNC)(_In_ WINTUN_LOGGER_CALLBACK NewLogger);
/**
* Minimum ring capacity.
*/
#define WINTUN_MIN_RING_CAPACITY 0x20000 /* 128kiB */
/**
* Maximum ring capacity.
*/
#define WINTUN_MAX_RING_CAPACITY 0x4000000 /* 64MiB */
/**
* A handle representing Wintun session
*/
typedef struct _TUN_SESSION *WINTUN_SESSION_HANDLE;
/**
* Starts Wintun session.
*
* @param Adapter Adapter handle obtained with WintunOpenAdapter or WintunCreateAdapter
*
* @param Capacity Rings capacity. Must be between WINTUN_MIN_RING_CAPACITY and WINTUN_MAX_RING_CAPACITY (incl.)
* Must be a power of two.
*
* @return Wintun session handle. Must be released with WintunEndSession. If the function fails, the return value is
* NULL. To get extended error information, call GetLastError.
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
WINTUN_SESSION_HANDLE(WINAPI WINTUN_START_SESSION_FUNC)(_In_ WINTUN_ADAPTER_HANDLE Adapter, _In_ DWORD Capacity);
/**
* Ends Wintun session.
*
* @param Session Wintun session handle obtained with WintunStartSession
*/
typedef VOID(WINAPI WINTUN_END_SESSION_FUNC)(_In_ WINTUN_SESSION_HANDLE Session);
/**
* Gets Wintun session's read-wait event handle.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @return Pointer to receive event handle to wait for available data when reading. Should
* WintunReceivePackets return ERROR_NO_MORE_ITEMS (after spinning on it for a while under heavy
* load), wait for this event to become signaled before retrying WintunReceivePackets. Do not call
* CloseHandle on this event - it is managed by the session.
*/
typedef HANDLE(WINAPI WINTUN_GET_READ_WAIT_EVENT_FUNC)(_In_ WINTUN_SESSION_HANDLE Session);
/**
* Maximum IP packet size
*/
#define WINTUN_MAX_IP_PACKET_SIZE 0xFFFF
/**
* Retrieves one or packet. After the packet content is consumed, call WintunReleaseReceivePacket with Packet returned
* from this function to release internal buffer. This function is thread-safe.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param PacketSize Pointer to receive packet size.
*
* @return Pointer to layer 3 IPv4 or IPv6 packet. Client may modify its content at will. If the function fails, the
* return value is NULL. To get extended error information, call GetLastError. Possible errors include the
* following:
* ERROR_HANDLE_EOF Wintun adapter is terminating;
* ERROR_NO_MORE_ITEMS Wintun buffer is exhausted;
* ERROR_INVALID_DATA Wintun buffer is corrupt
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
_Post_writable_byte_size_(*PacketSize)
BYTE *(WINAPI WINTUN_RECEIVE_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _Out_ DWORD *PacketSize);
/**
* Releases internal buffer after the received packet has been processed by the client. This function is thread-safe.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param Packet Packet obtained with WintunReceivePacket
*/
typedef VOID(
WINAPI WINTUN_RELEASE_RECEIVE_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _In_ const BYTE *Packet);
/**
* Allocates memory for a packet to send. After the memory is filled with packet data, call WintunSendPacket to send
* and release internal buffer. WintunAllocateSendPacket is thread-safe and the WintunAllocateSendPacket order of
* calls define the packet sending order.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param PacketSize Exact packet size. Must be less or equal to WINTUN_MAX_IP_PACKET_SIZE.
*
* @return Returns pointer to memory where to prepare layer 3 IPv4 or IPv6 packet for sending. If the function fails,
* the return value is NULL. To get extended error information, call GetLastError. Possible errors include the
* following:
* ERROR_HANDLE_EOF Wintun adapter is terminating;
* ERROR_BUFFER_OVERFLOW Wintun buffer is full;
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
_Post_writable_byte_size_(PacketSize)
BYTE *(WINAPI WINTUN_ALLOCATE_SEND_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _In_ DWORD PacketSize);
/**
* Sends the packet and releases internal buffer. WintunSendPacket is thread-safe, but the WintunAllocateSendPacket
* order of calls define the packet sending order. This means the packet is not guaranteed to be sent in the
* WintunSendPacket yet.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param Packet Packet obtained with WintunAllocateSendPacket
*/
typedef VOID(WINAPI WINTUN_SEND_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _In_ const BYTE *Packet);
#pragma warning(pop)
#ifdef __cplusplus
}
#endif

View File

@@ -4,13 +4,9 @@ import (
"fmt" "fmt"
"net" "net"
"strconv" "strconv"
"strings"
"sync" "sync"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
) )
// This whole thing should be rewritten to use context // This whole thing should be rewritten to use context
@@ -34,10 +30,11 @@ func newDnsRecords(hostMap *HostMap) *dnsRecords {
func (d *dnsRecords) Query(data string) string { func (d *dnsRecords) Query(data string) string {
d.RLock() d.RLock()
defer d.RUnlock() if r, ok := d.dnsMap[data]; ok {
if r, ok := d.dnsMap[strings.ToLower(data)]; ok { d.RUnlock()
return r return r
} }
d.RUnlock()
return "" return ""
} }
@@ -46,8 +43,8 @@ func (d *dnsRecords) QueryCert(data string) string {
if ip == nil { if ip == nil {
return "" return ""
} }
iip := iputil.Ip2VpnIp(ip) iip := ip2int(ip)
hostinfo, err := d.hostMap.QueryVpnIp(iip) hostinfo, err := d.hostMap.QueryVpnIP(iip)
if err != nil { if err != nil {
return "" return ""
} }
@@ -62,11 +59,11 @@ func (d *dnsRecords) QueryCert(data string) string {
func (d *dnsRecords) Add(host, data string) { func (d *dnsRecords) Add(host, data string) {
d.Lock() d.Lock()
defer d.Unlock() d.dnsMap[host] = data
d.dnsMap[strings.ToLower(host)] = data d.Unlock()
} }
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) { func parseQuery(m *dns.Msg, w dns.ResponseWriter) {
for _, q := range m.Question { for _, q := range m.Question {
switch q.Qtype { switch q.Qtype {
case dns.TypeA: case dns.TypeA:
@@ -98,44 +95,37 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
} }
} }
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) { func handleDnsRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg) m := new(dns.Msg)
m.SetReply(r) m.SetReply(r)
m.Compress = false m.Compress = false
switch r.Opcode { switch r.Opcode {
case dns.OpcodeQuery: case dns.OpcodeQuery:
parseQuery(l, m, w) parseQuery(m, w)
} }
w.WriteMsg(m) w.WriteMsg(m)
} }
func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() { func dnsMain(hostMap *HostMap, c *Config) {
dnsR = newDnsRecords(hostMap) dnsR = newDnsRecords(hostMap)
// attach request handler func // attach request handler func
dns.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) { dns.HandleFunc(".", handleDnsRequest)
handleDnsRequest(l, w, r)
})
c.RegisterReloadCallback(func(c *config.C) { c.RegisterReloadCallback(reloadDns)
reloadDns(l, c) startDns(c)
})
return func() {
startDns(l, c)
}
} }
func getDnsServerAddr(c *config.C) string { func getDnsServerAddr(c *Config) string {
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)) return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
} }
func startDns(l *logrus.Logger, c *config.C) { func startDns(c *Config) {
dnsAddr = getDnsServerAddr(c) dnsAddr = getDnsServerAddr(c)
dnsServer = &dns.Server{Addr: dnsAddr, Net: "udp"} dnsServer = &dns.Server{Addr: dnsAddr, Net: "udp"}
l.WithField("dnsListener", dnsAddr).Info("Starting DNS responder") l.Debugf("Starting DNS responder at %s\n", dnsAddr)
err := dnsServer.ListenAndServe() err := dnsServer.ListenAndServe()
defer dnsServer.Shutdown() defer dnsServer.Shutdown()
if err != nil { if err != nil {
@@ -143,7 +133,7 @@ func startDns(l *logrus.Logger, c *config.C) {
} }
} }
func reloadDns(l *logrus.Logger, c *config.C) { func reloadDns(c *Config) {
if dnsAddr == getDnsServerAddr(c) { if dnsAddr == getDnsServerAddr(c) {
l.Debug("No DNS server config change detected") l.Debug("No DNS server config change detected")
return return
@@ -151,5 +141,5 @@ func reloadDns(l *logrus.Logger, c *config.C) {
l.Debug("Restarting DNS server") l.Debug("Restarting DNS server")
dnsServer.Shutdown() dnsServer.Shutdown()
go startDns(l, c) go startDns(c)
} }

View File

@@ -1,3 +0,0 @@
package e2e
// This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before

View File

@@ -1,870 +0,0 @@
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"fmt"
"net"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
)
func BenchmarkHotPath(b *testing.B) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
r := router.NewR(b, myControl, theirControl)
r.CancelFlowLogs()
for n := 0; n < b.N; n++ {
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
_ = r.RouteForAllUntilTxTun(theirControl)
}
myControl.Stop()
theirControl.Stop()
}
func TestGoodHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
t.Log("Get their stage 1 packet so that we can play with it")
stage1Packet := theirControl.GetFromUDP(true)
t.Log("I consume a garbage packet with a proper nebula header for our tunnel")
// this should log a statement and get ignored, allowing the real handshake packet to complete the tunnel
badPacket := stage1Packet.Copy()
badPacket.Data = badPacket.Data[:len(badPacket.Data)-header.Len]
myControl.InjectUDPPacket(badPacket)
t.Log("Have me consume their real stage 1 packet. I have a tunnel now")
myControl.InjectUDPPacket(stage1Packet)
t.Log("Wait until we see my cached packet come through")
myControl.WaitForType(1, 0, theirControl)
t.Log("Make sure our host infos are correct")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
t.Log("Get that cached packet and make sure it looks right")
myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
t.Log("Do a bidirectional tunnel test")
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
myControl.Stop()
theirControl.Stop()
//TODO: assert hostmaps
}
func TestWrongResponderHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
// The IPs here are chosen on purpose:
// The current remote handling will sort by preference, public, and then lexically.
// So we need them to have a higher address than evil (we could apply a preference though)
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
// Add their real udp addr, which should be tried after evil.
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl, evilControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
evilControl.Start()
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
h := &header.H{}
err := h.Parse(p.Data)
if err != nil {
panic(err)
}
if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 {
return router.RouteAndExit
}
return router.KeepRouting
})
//TODO: Assert pending hostmap - I should have a correct hostinfo for them now
t.Log("My cached packet should be received by them")
myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
t.Log("Test the tunnel with them")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
t.Log("Flush all packets from all controllers")
r.FlushAll()
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil")
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
//TODO: assert hostmaps for everyone
r.RenderHostmaps("Final hostmaps", myControl, theirControl, evilControl)
t.Log("Success!")
myControl.Stop()
theirControl.Stop()
}
func TestStage1Race(t *testing.T) {
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
// But will eventually collapse down to a single tunnel
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
t.Log("Trigger a handshake to start on both me and them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
t.Log("Get both stage 1 handshake packets")
myHsForThem := myControl.GetFromUDP(true)
theirHsForMe := theirControl.GetFromUDP(true)
r.Log("Now inject both stage 1 handshake packets")
r.InjectUDPPacket(theirControl, myControl, theirHsForMe)
r.InjectUDPPacket(myControl, theirControl, myHsForThem)
r.Log("Route until they receive a message packet")
myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.Log("Their cached packet should be received by me")
theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
r.Log("Do a bidirectional tunnel test")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
myHostmapHosts := myControl.ListHostmapHosts(false)
myHostmapIndexes := myControl.ListHostmapIndexes(false)
theirHostmapHosts := theirControl.ListHostmapHosts(false)
theirHostmapIndexes := theirControl.ListHostmapIndexes(false)
// We should have two tunnels on both sides
assert.Len(t, myHostmapHosts, 1)
assert.Len(t, theirHostmapHosts, 1)
assert.Len(t, myHostmapIndexes, 2)
assert.Len(t, theirHostmapIndexes, 2)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Spin until connection manager tears down a tunnel")
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second)
}
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
// We should only have a single tunnel now on both sides
assert.Len(t, myFinalHostmapHosts, 1)
assert.Len(t, theirFinalHostmapHosts, 1)
assert.Len(t, myFinalHostmapIndexes, 1)
assert.Len(t, theirFinalHostmapIndexes, 1)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
myControl.Stop()
theirControl.Stop()
}
func TestUncleanShutdownRaceLoser(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
r.Log("Trigger a handshake from me to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.Log("Nuke my hostmap")
myHostmap := myControl.GetHostmap()
myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again"))
p = r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.Log("Assert the tunnel works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
r.Log("Wait for the dead index to go away")
start := len(theirControl.GetHostmap().Indexes)
for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
if len(theirControl.GetHostmap().Indexes) < start {
break
}
time.Sleep(time.Second)
}
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
}
func TestUncleanShutdownRaceWinner(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
r.Log("Trigger a handshake from me to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
r.Log("Nuke my hostmap")
theirHostmap := theirControl.GetHostmap()
theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again"))
p = r.RouteForAllUntilTxTun(myControl)
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
r.Log("Assert the tunnel works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
r.Log("Wait for the dead index to go away")
start := len(myControl.GetHostmap().Indexes)
for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
if len(myControl.GetHostmap().Indexes) < start {
break
}
time.Sleep(time.Second)
}
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
}
func TestRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
}
func TestStage1RaceRelays(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
r.Log("Get a tunnel between me and relay")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
r.Log("Get a tunnel between them and relay")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
r.Log("Trigger a handshake from both them and me via relay to them and me")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
r.Log("Wait for a packet from them to me")
p := r.RouteForAllUntilTxTun(myControl)
_ = p
myControl.Stop()
theirControl.Stop()
relayControl.Stop()
//
////TODO: assert hostmaps
}
func TestStage1RaceRelays2(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
l := NewTestLogger()
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
r.Log("Get a tunnel between me and relay")
l.Info("Get a tunnel between me and relay")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
r.Log("Get a tunnel between them and relay")
l.Info("Get a tunnel between them and relay")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
r.Log("Trigger a handshake from both them and me via relay to them and me")
l.Info("Trigger a handshake from both them and me via relay to them and me")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
//r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone)
//r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone)
r.Log("Wait for a packet from them to me")
l.Info("Wait for a packet from them to me; myControl")
r.RouteForAllUntilTxTun(myControl)
l.Info("Wait for a packet from them to me; theirControl")
r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works")
l.Info("Assert the tunnel works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
t.Log("Wait until we remove extra tunnels")
l.Info("Wait until we remove extra tunnels")
l.WithFields(
logrus.Fields{
"myControl": len(myControl.GetHostmap().Indexes),
"theirControl": len(theirControl.GetHostmap().Indexes),
"relayControl": len(relayControl.GetHostmap().Indexes),
}).Info("Waiting for hostinfos to be removed...")
hostInfos := len(myControl.GetHostmap().Indexes) + len(theirControl.GetHostmap().Indexes) + len(relayControl.GetHostmap().Indexes)
retries := 60
for hostInfos > 6 && retries > 0 {
hostInfos = len(myControl.GetHostmap().Indexes) + len(theirControl.GetHostmap().Indexes) + len(relayControl.GetHostmap().Indexes)
l.WithFields(
logrus.Fields{
"myControl": len(myControl.GetHostmap().Indexes),
"theirControl": len(theirControl.GetHostmap().Indexes),
"relayControl": len(relayControl.GetHostmap().Indexes),
}).Info("Waiting for hostinfos to be removed...")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second)
retries--
}
r.Log("Assert the tunnel works")
l.Info("Assert the tunnel works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
myControl.Stop()
theirControl.Stop()
relayControl.Stop()
//
////TODO: assert hostmaps
}
func TestRehandshakingRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
r.Log("Renew relay certificate and spin until me and them sees it")
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
panic(err)
}
relayConfig.Settings["pki"] = m{
"ca": string(caB),
"cert": string(myNextPEM),
"key": string(myNextPrivKey),
}
rc, err := yaml.Marshal(relayConfig.Settings)
assert.NoError(t, err)
relayConfig.ReloadConfigString(string(rc))
for {
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now
r.Log("Certificate between my and relay is updated!")
break
}
time.Sleep(time.Second)
}
for {
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now
r.Log("Certificate between their and relay is updated!")
break
}
time.Sleep(time.Second)
}
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// We should have two hostinfos on all sides
for len(myControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("myControl hostinfos got cleaned up!")
for len(theirControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("theirControl hostinfos got cleaned up!")
for len(relayControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("relayControl hostinfos got cleaned up!")
}
func TestRehandshaking(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
// Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
t.Log("Stand up a tunnel between me and them")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew my certificate and spin until their sees it")
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
panic(err)
}
myConfig.Settings["pki"] = m{
"ca": string(caB),
"cert": string(myNextPEM),
"key": string(myNextPrivKey),
}
rc, err := yaml.Marshal(myConfig.Settings)
assert.NoError(t, err)
myConfig.ReloadConfigString(string(rc))
for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now
break
}
time.Sleep(time.Second)
}
// Flip their firewall to only allowing the new group to catch the tunnels reverting incorrectly
rc, err = yaml.Marshal(theirConfig.Settings)
assert.NoError(t, err)
var theirNewConfig m
assert.NoError(t, yaml.Unmarshal(rc, &theirNewConfig))
theirFirewall := theirNewConfig["firewall"].(map[interface{}]interface{})
theirFirewall["inbound"] = []m{{
"proto": "any",
"port": "any",
"group": "new group",
}}
rc, err = yaml.Marshal(theirNewConfig)
assert.NoError(t, err)
theirConfig.ReloadConfigString(string(rc))
r.Log("Spin until there is only 1 tunnel")
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second)
}
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
// Make sure the correct tunnel won
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
assert.Contains(t, c.Cert.Details.Groups, "new group")
// We should only have a single tunnel now on both sides
assert.Len(t, myFinalHostmapHosts, 1)
assert.Len(t, theirFinalHostmapHosts, 1)
assert.Len(t, myFinalHostmapIndexes, 1)
assert.Len(t, theirFinalHostmapIndexes, 1)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
myControl.Stop()
theirControl.Stop()
}
func TestRehandshakingLoser(t *testing.T) {
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
// Should be the one with the new certificate
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
// Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
t.Log("Stand up a tunnel between me and them")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
tt1 := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
tt2 := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
fmt.Println(tt1.LocalIndex, tt2.LocalIndex)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew their certificate and spin until mine sees it")
_, _, theirNextPrivKey, theirNextPEM := newTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
panic(err)
}
theirConfig.Settings["pki"] = m{
"ca": string(caB),
"cert": string(theirNextPEM),
"key": string(theirNextPrivKey),
}
rc, err := yaml.Marshal(theirConfig.Settings)
assert.NoError(t, err)
theirConfig.ReloadConfigString(string(rc))
for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
_, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"]
if theirNewGroup {
break
}
time.Sleep(time.Second)
}
// Flip my firewall to only allowing the new group to catch the tunnels reverting incorrectly
rc, err = yaml.Marshal(myConfig.Settings)
assert.NoError(t, err)
var myNewConfig m
assert.NoError(t, yaml.Unmarshal(rc, &myNewConfig))
theirFirewall := myNewConfig["firewall"].(map[interface{}]interface{})
theirFirewall["inbound"] = []m{{
"proto": "any",
"port": "any",
"group": "their new group",
}}
rc, err = yaml.Marshal(myNewConfig)
assert.NoError(t, err)
myConfig.ReloadConfigString(string(rc))
r.Log("Spin until there is only 1 tunnel")
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second)
}
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
// Make sure the correct tunnel won
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group")
// We should only have a single tunnel now on both sides
assert.Len(t, myFinalHostmapHosts, 1)
assert.Len(t, theirFinalHostmapHosts, 1)
assert.Len(t, myFinalHostmapIndexes, 1)
assert.Len(t, theirFinalHostmapIndexes, 1)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
myControl.Stop()
theirControl.Stop()
}
func TestRaceRegression(t *testing.T) {
// This test forces stage 1, stage 2, stage 1 to be received by me from them
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
// caused a cross-linked hostinfo
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
//them rx stage:1 initiatorIndex=642843150 responderIndex=0
//me rx stage:1 initiatorIndex=120607833 responderIndex=0
//them rx stage:1 initiatorIndex=642843150 responderIndex=0
//me rx stage:2 initiatorIndex=642843150 responderIndex=3701775874
//me rx stage:1 initiatorIndex=120607833 responderIndex=0
//them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089
t.Log("Start both handshakes")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
t.Log("Get both stage 1")
myStage1ForThem := myControl.GetFromUDP(true)
theirStage1ForMe := theirControl.GetFromUDP(true)
t.Log("Inject them in a special way")
theirControl.InjectUDPPacket(myStage1ForThem)
myControl.InjectUDPPacket(theirStage1ForMe)
theirControl.InjectUDPPacket(myStage1ForThem)
//TODO: ensure stage 2
t.Log("Get both stage 2")
myStage2ForThem := myControl.GetFromUDP(true)
theirStage2ForMe := theirControl.GetFromUDP(true)
t.Log("Inject them in a special way again")
myControl.InjectUDPPacket(theirStage2ForMe)
myControl.InjectUDPPacket(theirStage1ForMe)
theirControl.InjectUDPPacket(myStage2ForThem)
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
t.Log("Flush the packets")
r.RouteForAllUntilTxTun(myControl)
r.RouteForAllUntilTxTun(theirControl)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
t.Log("Make sure the tunnel still works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
myControl.Stop()
theirControl.Stop()
}
//TODO: test
// Race winner renews and handshakes
// Race loser renews and handshakes
// Does race winner repin the cert to old?
//TODO: add a test with many lies

View File

@@ -1,325 +0,0 @@
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"crypto/rand"
"fmt"
"io"
"net"
"os"
"testing"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
"gopkg.in/yaml.v2"
)
type m map[string]interface{}
// newSimpleServer creates a nebula instance with many assumptions
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) {
l := NewTestLogger()
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
copy(vpnIpNet.IP, udpIp)
vpnIpNet.IP[1] += 128
udpAddr := net.UDPAddr{
IP: udpIp,
Port: 4242,
}
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
caB, err := caCrt.MarshalToPEM()
if err != nil {
panic(err)
}
mc := m{
"pki": m{
"ca": string(caB),
"cert": string(myPEM),
"key": string(myPrivKey),
},
//"tun": m{"disabled": true},
"firewall": m{
"outbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
"inbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
},
//"handshakes": m{
// "try_interval": "1s",
//},
"listen": m{
"host": udpAddr.IP.String(),
"port": udpAddr.Port,
},
"logging": m{
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
"level": l.Level.String(),
},
"timers": m{
"pending_deletion_interval": 2,
"connection_alive_interval": 2,
},
}
if overrides != nil {
err = mergo.Merge(&overrides, mc, mergo.WithAppendSlice)
if err != nil {
panic(err)
}
mc = overrides
}
cb, err := yaml.Marshal(mc)
if err != nil {
panic(err)
}
c := config.NewC(l)
c.LoadString(string(cb))
control, err := nebula.Main(c, false, "e2e-test", l, nil)
if err != nil {
panic(err)
}
return control, vpnIpNet, &udpAddr, c
}
// newTestCaCert will generate a CA cert
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = ips
}
if len(subnets) > 0 {
nc.Details.Subnets = subnets
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(cert.Curve_CURVE25519, priv)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, priv, pem
}
// newTestCert will generate a signed certificate with the provided details.
// Expiry times are defaulted if you do not pass them in
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
issuer, err := ca.Sha256Sum()
if err != nil {
panic(err)
}
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
pub, rawPriv := x25519Keypair()
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: name,
Ips: []*net.IPNet{ip},
Subnets: subnets,
Groups: groups,
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: false,
Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
},
}
err = nc.Sign(ca.Details.Curve, key)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
}
func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err)
}
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
}
type doneCb func()
func deadline(t *testing.T, seconds time.Duration) doneCb {
timeout := time.After(seconds * time.Second)
done := make(chan bool)
go func() {
select {
case <-timeout:
t.Fatal("Test did not finish in time")
case <-done:
}
}()
return func() {
done <- true
}
}
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
// Send a packet from them to me
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
bPacket := r.RouteForAllUntilTxTun(controlA)
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
// And once more from me to them
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
aPacket := r.RouteForAllUntilTxTun(controlB)
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
}
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) {
// Get both host infos
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false)
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false)
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
// Check that both vpn and real addr are correct
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A")
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B")
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
// Check that our indexes match
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index")
//TODO: Would be nice to assert this memory
//checkIndexes := func(name string, hm *HostMap, hi *HostInfo) {
// hBbyIndex := hmA.Indexes[hBinA.localIndexId]
// assert.NotNil(t, hBbyIndex, "Could not host info by local index in %s", name)
// assert.Equal(t, &hBbyIndex, &hBinA, "%s Indexes map did not point to the right host info", name)
//
// //TODO: remote indexes are susceptible to collision
// hBbyRemoteIndex := hmA.RemoteIndexes[hBinA.remoteIndexId]
// assert.NotNil(t, hBbyIndex, "Could not host info by remote index in %s", name)
// assert.Equal(t, &hBbyRemoteIndex, &hBinA, "%s RemoteIndexes did not point to the right host info", name)
//}
//
//// Check hostmap indexes too
//checkIndexes("hmA", hmA, hBinA)
//checkIndexes("hmB", hmB, hAinB)
}
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) {
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
assert.NotNil(t, v4, "No ipv4 data found")
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect")
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect")
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
assert.NotNil(t, udp, "No udp data found")
assert.Equal(t, fromPort, uint16(udp.SrcPort), "Source port was incorrect")
assert.Equal(t, toPort, uint16(udp.DstPort), "Dest port was incorrect")
data := packet.ApplicationLayer()
assert.NotNil(t, data)
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
}
func NewTestLogger() *logrus.Logger {
l := logrus.New()
v := os.Getenv("TEST_LOGS")
if v == "" {
l.SetOutput(io.Discard)
l.SetLevel(logrus.PanicLevel)
return l
}
switch v {
case "2":
l.SetLevel(logrus.DebugLevel)
case "3":
l.SetLevel(logrus.TraceLevel)
default:
l.SetLevel(logrus.InfoLevel)
}
return l
}

View File

@@ -1,3 +0,0 @@
package router
// This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before

View File

@@ -1,145 +0,0 @@
//go:build e2e_testing
// +build e2e_testing
package router
import (
"fmt"
"sort"
"strings"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/iputil"
)
type edge struct {
from string
to string
dual bool
}
func renderHostmaps(controls ...*nebula.Control) string {
var lines []*edge
r := "graph TB\n"
for _, c := range controls {
sr, se := renderHostmap(c)
r += sr
for _, e := range se {
add := true
// Collapse duplicate edges into a bi-directionally connected edge
for _, ge := range lines {
if e.to == ge.from && e.from == ge.to {
add = false
ge.dual = true
break
}
}
if add {
lines = append(lines, e)
}
}
}
for _, line := range lines {
if line.dual {
r += fmt.Sprintf("\t%v <--> %v\n", line.from, line.to)
} else {
r += fmt.Sprintf("\t%v --> %v\n", line.from, line.to)
}
}
return r
}
func renderHostmap(c *nebula.Control) (string, []*edge) {
var lines []string
var globalLines []*edge
clusterName := strings.Trim(c.GetCert().Details.Name, " ")
clusterVpnIp := c.GetCert().Details.Ips[0].IP
r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp)
hm := c.GetHostmap()
hm.RLock()
defer hm.RUnlock()
// Draw the vpn to index nodes
r += fmt.Sprintf("\t\tsubgraph %s.hosts[\"Hosts (vpn ip to index)\"]\n", clusterName)
hosts := sortedHosts(hm.Hosts)
for _, vpnIp := range hosts {
hi := hm.Hosts[vpnIp]
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, vpnIp, vpnIp)
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, hi.GetLocalIndex()))
rs := hi.GetRelayState()
for _, relayIp := range rs.CopyRelayIps() {
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
}
for _, relayIp := range rs.CopyRelayForIdxs() {
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
}
}
r += "\t\tend\n"
// Draw the relay hostinfos
if len(hm.Relays) > 0 {
r += fmt.Sprintf("\t\tsubgraph %s.relays[\"Relays (relay index to hostinfo)\"]\n", clusterName)
for relayIndex, hi := range hm.Relays {
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, relayIndex, relayIndex)
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, relayIndex, clusterName, hi.GetLocalIndex()))
}
r += "\t\tend\n"
}
// Draw the local index to relay or remote index nodes
r += fmt.Sprintf("\t\tsubgraph indexes.%s[\"Indexes (index to hostinfo)\"]\n", clusterName)
indexes := sortedIndexes(hm.Indexes)
for _, idx := range indexes {
hi, ok := hm.Indexes[idx]
if ok {
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp())
remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ")
globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())})
_ = hi
}
}
r += "\t\tend\n"
// Add the edges inside this host
for _, line := range lines {
r += fmt.Sprintf("\t\t%v\n", line)
}
r += "\tend\n"
return r, globalLines
}
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp {
keys := make([]iputil.VpnIp, 0, len(hosts))
for key := range hosts {
keys = append(keys, key)
}
sort.SliceStable(keys, func(i, j int) bool {
return keys[i] > keys[j]
})
return keys
}
func sortedIndexes(indexes map[uint32]*nebula.HostInfo) []uint32 {
keys := make([]uint32, 0, len(indexes))
for key := range indexes {
keys = append(keys, key)
}
sort.SliceStable(keys, func(i, j int) bool {
return keys[i] > keys[j]
})
return keys
}

View File

@@ -1,767 +0,0 @@
//go:build e2e_testing
// +build e2e_testing
package router
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
"golang.org/x/exp/maps"
)
type R struct {
// Simple map of the ip:port registered on a control to the control
// Basically a router, right?
controls map[string]*nebula.Control
// A map for inbound packets for a control that doesn't know about this address
inNat map[string]*nebula.Control
// A last used map, if an inbound packet hit the inNat map then
// all return packets should use the same last used inbound address for the outbound sender
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
outNat map[string]net.UDPAddr
// A map of vpn ip to the nebula control it belongs to
vpnControls map[iputil.VpnIp]*nebula.Control
ignoreFlows []ignoreFlow
flow []flowEntry
// A set of additional mermaid graphs to draw in the flow log markdown file
// Currently consisting only of hostmap renders
additionalGraphs []mermaidGraph
// All interactions are locked to help serialize behavior
sync.Mutex
fn string
cancelRender context.CancelFunc
t testing.TB
}
type ignoreFlow struct {
tun NullBool
messageType header.MessageType
subType header.MessageSubType
//from
//to
}
type mermaidGraph struct {
title string
content string
}
type NullBool struct {
HasValue bool
IsTrue bool
}
type flowEntry struct {
note string
packet *packet
}
type packet struct {
from *nebula.Control
to *nebula.Control
packet *udp.Packet
tun bool // a packet pulled off a tun device
rx bool // the packet was received by a udp device
}
func (p *packet) WasReceived() {
if p != nil {
p.rx = true
}
}
type ExitType int
const (
// KeepRouting the function will get called again on the next packet
KeepRouting ExitType = 0
// ExitNow does not route this packet and exits immediately
ExitNow ExitType = 1
// RouteAndExit routes this packet and exits immediately afterwards
RouteAndExit ExitType = 2
)
type ExitFunc func(packet *udp.Packet, receiver *nebula.Control) ExitType
// NewR creates a new router to pass packets in a controlled fashion between the provided controllers.
// The packet flow will be recorded in a file within the mermaid directory under the same name as the test.
// Renders will occur automatically, roughly every 100ms, until a call to RenderFlow() is made
func NewR(t testing.TB, controls ...*nebula.Control) *R {
ctx, cancel := context.WithCancel(context.Background())
if err := os.MkdirAll("mermaid", 0755); err != nil {
panic(err)
}
r := &R{
controls: make(map[string]*nebula.Control),
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
inNat: make(map[string]*nebula.Control),
outNat: make(map[string]net.UDPAddr),
flow: []flowEntry{},
ignoreFlows: []ignoreFlow{},
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
t: t,
cancelRender: cancel,
}
// Try to remove our render file
os.Remove(r.fn)
for _, c := range controls {
addr := c.GetUDPAddr()
if _, ok := r.controls[addr]; ok {
panic("Duplicate listen address: " + addr)
}
r.vpnControls[c.GetVpnIp()] = c
r.controls[addr] = c
}
// Spin the renderer in case we go nuts and the test never completes
go func() {
clockSource := time.NewTicker(time.Millisecond * 100)
defer clockSource.Stop()
for {
select {
case <-ctx.Done():
return
case <-clockSource.C:
r.renderHostmaps("clock tick")
r.renderFlow()
}
}
}()
return r
}
// AddRoute will place the nebula controller at the ip and port specified.
// It does not look at the addr attached to the instance.
// If a route is used, this will behave like a NAT for the return path.
// Rewriting the source ip:port to what was last sent to from the origin
func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) {
r.Lock()
defer r.Unlock()
inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port))
if _, ok := r.inNat[inAddr]; ok {
panic("Duplicate listen address inNat: " + inAddr)
}
r.inNat[inAddr] = c
}
// RenderFlow renders the packet flow seen up until now and stops further automatic renders from happening.
func (r *R) RenderFlow() {
r.cancelRender()
r.renderFlow()
}
// CancelFlowLogs stops flow logs from being tracked and destroys any logs already collected
func (r *R) CancelFlowLogs() {
r.cancelRender()
r.flow = nil
}
func (r *R) renderFlow() {
if r.flow == nil {
return
}
f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
if err != nil {
panic(err)
}
var participants = map[string]struct{}{}
var participantsVals []string
fmt.Fprintln(f, "```mermaid")
fmt.Fprintln(f, "sequenceDiagram")
// Assemble participants
for _, e := range r.flow {
if e.packet == nil {
continue
}
addr := e.packet.from.GetUDPAddr()
if _, ok := participants[addr]; ok {
continue
}
participants[addr] = struct{}{}
sanAddr := strings.Replace(addr, ":", "-", 1)
participantsVals = append(participantsVals, sanAddr)
fmt.Fprintf(
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
)
}
if len(participantsVals) > 2 {
// Get the first and last participantVals for notes
participantsVals = []string{participantsVals[0], participantsVals[len(participantsVals)-1]}
}
// Print packets
h := &header.H{}
for _, e := range r.flow {
if e.packet == nil {
//fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note)
continue
}
p := e.packet
if p.tun {
fmt.Fprintln(f, r.formatUdpPacket(p))
} else {
if err := h.Parse(p.packet.Data); err != nil {
panic(err)
}
line := "--x"
if p.rx {
line = "->>"
}
fmt.Fprintf(f,
" %s%s%s: %s(%s), index %v, counter: %v\n",
strings.Replace(p.from.GetUDPAddr(), ":", "-", 1),
line,
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
)
}
}
fmt.Fprintln(f, "```")
for _, g := range r.additionalGraphs {
fmt.Fprintf(f, "## %s\n", g.title)
fmt.Fprintln(f, "```mermaid")
fmt.Fprintln(f, g.content)
fmt.Fprintln(f, "```")
}
}
// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria.
// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets
// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered
func (r *R) IgnoreFlow(messageType header.MessageType, subType header.MessageSubType, tun NullBool) {
r.Lock()
defer r.Unlock()
r.ignoreFlows = append(r.ignoreFlows, ignoreFlow{
tun,
messageType,
subType,
})
}
func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
r.Lock()
defer r.Unlock()
s := renderHostmaps(controls...)
if len(r.additionalGraphs) > 0 {
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
if lastGraph.content == s && lastGraph.title == title {
// Ignore this rendering if it matches the last rendering added
// This is useful if you want to track rendering changes
return
}
}
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
title: title,
content: s,
})
}
func (r *R) renderHostmaps(title string) {
c := maps.Values(r.controls)
sort.SliceStable(c, func(i, j int) bool {
return c[i].GetVpnIp() > c[j].GetVpnIp()
})
s := renderHostmaps(c...)
if len(r.additionalGraphs) > 0 {
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
if lastGraph.content == s {
// Ignore this rendering if it matches the last rendering added
// This is useful if you want to track rendering changes
return
}
}
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
title: title,
content: s,
})
}
// InjectFlow can be used to record packet flow if the test is handling the routing on its own.
// The packet is assumed to have been received
func (r *R) InjectFlow(from, to *nebula.Control, p *udp.Packet) {
r.Lock()
defer r.Unlock()
r.unlockedInjectFlow(from, to, p, false)
}
func (r *R) Log(arg ...any) {
if r.flow == nil {
return
}
r.Lock()
r.flow = append(r.flow, flowEntry{note: fmt.Sprint(arg...)})
r.t.Log(arg...)
r.Unlock()
}
func (r *R) Logf(format string, arg ...any) {
if r.flow == nil {
return
}
r.Lock()
r.flow = append(r.flow, flowEntry{note: fmt.Sprintf(format, arg...)})
r.t.Logf(format, arg...)
r.Unlock()
}
// unlockedInjectFlow is used by the router to record a packet has been transmitted, the packet is returned and
// should be marked as received AFTER it has been placed on the receivers channel.
// If flow logs have been disabled this function will return nil
func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool) *packet {
if r.flow == nil {
return nil
}
r.renderHostmaps(fmt.Sprintf("Packet %v", len(r.flow)))
if len(r.ignoreFlows) > 0 {
var h header.H
err := h.Parse(p.Data)
if err != nil {
panic(err)
}
for _, i := range r.ignoreFlows {
if !tun {
if i.messageType == h.Type && i.subType == h.Subtype {
return nil
}
} else if i.tun.HasValue && i.tun.IsTrue {
return nil
}
}
}
fp := &packet{
from: from,
to: to,
packet: p.Copy(),
tun: tun,
}
r.flow = append(r.flow, flowEntry{packet: fp})
return fp
}
// OnceFrom will route a single packet from sender then return
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) OnceFrom(sender *nebula.Control) {
r.RouteExitFunc(sender, func(*udp.Packet, *nebula.Control) ExitType {
return RouteAndExit
})
}
// RouteUntilTxTun will route for sender and return when a packet is seen on receivers tun
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []byte {
tunTx := receiver.GetTunTxChan()
udpTx := sender.GetUDPTxChan()
for {
select {
// Maybe we already have something on the tun for us
case b := <-tunTx:
r.Lock()
np := udp.Packet{Data: make([]byte, len(b))}
copy(np.Data, b)
r.unlockedInjectFlow(receiver, receiver, &np, true)
r.Unlock()
return b
// Nope, lets push the sender along
case p := <-udpTx:
outAddr := sender.GetUDPAddr()
r.Lock()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
c := r.getControl(outAddr, inAddr, p)
if c == nil {
r.Unlock()
panic("No control for udp tx")
}
fp := r.unlockedInjectFlow(sender, c, p, false)
c.InjectUDPPacket(p)
fp.WasReceived()
r.Unlock()
}
}
}
// RouteForAllUntilTxTun will route for everyone and return when a packet is seen on receivers tun
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
sc := make([]reflect.SelectCase, len(r.controls)+1)
cm := make([]*nebula.Control, len(r.controls)+1)
i := 0
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(receiver.GetTunTxChan()),
Send: reflect.Value{},
}
cm[i] = receiver
i++
for _, c := range r.controls {
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(c.GetUDPTxChan()),
Send: reflect.Value{},
}
cm[i] = c
i++
}
for {
x, rx, _ := reflect.Select(sc)
r.Lock()
if x == 0 {
// we are the tun tx, we can exit
p := rx.Interface().([]byte)
np := udp.Packet{Data: make([]byte, len(p))}
copy(np.Data, p)
r.unlockedInjectFlow(cm[x], cm[x], &np, true)
r.Unlock()
return p
} else {
// we are a udp tx, route and continue
p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
c := r.getControl(outAddr, inAddr, p)
if c == nil {
r.Unlock()
panic("No control for udp tx")
}
fp := r.unlockedInjectFlow(cm[x], c, p, false)
c.InjectUDPPacket(p)
fp.WasReceived()
}
r.Unlock()
}
}
// RouteExitFunc will call the whatDo func with each udp packet from sender.
// whatDo can return:
// - exitNow: the packet will not be routed and this call will return immediately
// - routeAndExit: this call will return immediately after routing the last packet from sender
// - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender
func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
h := &header.H{}
for {
p := sender.GetFromUDP(true)
r.Lock()
if err := h.Parse(p.Data); err != nil {
panic(err)
}
outAddr := sender.GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil {
r.Unlock()
panic("Can't route for host: " + inAddr)
}
e := whatDo(p, receiver)
switch e {
case ExitNow:
r.Unlock()
return
case RouteAndExit:
fp := r.unlockedInjectFlow(sender, receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
r.Unlock()
return
case KeepRouting:
fp := r.unlockedInjectFlow(sender, receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
default:
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
}
r.Unlock()
}
}
// RouteUntilAfterMsgType will route for sender until a message type is seen and sent from sender
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteUntilAfterMsgType(sender *nebula.Control, msgType header.MessageType, subType header.MessageSubType) {
h := &header.H{}
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
if err := h.Parse(p.Data); err != nil {
panic(err)
}
if h.Type == msgType && h.Subtype == subType {
return RouteAndExit
}
return KeepRouting
})
}
func (r *R) RouteForAllUntilAfterMsgTypeTo(receiver *nebula.Control, msgType header.MessageType, subType header.MessageSubType) {
h := &header.H{}
r.RouteForAllExitFunc(func(p *udp.Packet, r *nebula.Control) ExitType {
if r != receiver {
return KeepRouting
}
if err := h.Parse(p.Data); err != nil {
panic(err)
}
if h.Type == msgType && h.Subtype == subType {
return RouteAndExit
}
return KeepRouting
})
}
func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet) {
r.Lock()
defer r.Unlock()
fp := r.unlockedInjectFlow(sender, receiver, packet, false)
receiver.InjectUDPPacket(packet)
fp.WasReceived()
}
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) {
if finish == KeepRouting {
finish = RouteAndExit
}
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) {
return finish
}
return KeepRouting
})
}
// RouteForAllExitFunc will route for every registered controller and calls the whatDo func with each udp packet from
// whatDo can return:
// - exitNow: the packet will not be routed and this call will return immediately
// - routeAndExit: this call will return immediately after routing the last packet from sender
// - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender
func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
sc := make([]reflect.SelectCase, len(r.controls))
cm := make([]*nebula.Control, len(r.controls))
i := 0
for _, c := range r.controls {
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(c.GetUDPTxChan()),
Send: reflect.Value{},
}
cm[i] = c
i++
}
for {
x, rx, _ := reflect.Select(sc)
r.Lock()
p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil {
r.Unlock()
panic("Can't route for host: " + inAddr)
}
e := whatDo(p, receiver)
switch e {
case ExitNow:
r.Unlock()
return
case RouteAndExit:
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
r.Unlock()
return
case KeepRouting:
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
default:
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
}
r.Unlock()
}
}
// FlushAll will route for every registered controller, exiting once there are no packets left to route
func (r *R) FlushAll() {
sc := make([]reflect.SelectCase, len(r.controls))
cm := make([]*nebula.Control, len(r.controls))
i := 0
for _, c := range r.controls {
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(c.GetUDPTxChan()),
Send: reflect.Value{},
}
cm[i] = c
i++
}
// Add a default case to exit when nothing is left to send
sc = append(sc, reflect.SelectCase{
Dir: reflect.SelectDefault,
Chan: reflect.Value{},
Send: reflect.Value{},
})
for {
x, rx, ok := reflect.Select(sc)
if !ok {
return
}
r.Lock()
p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil {
r.Unlock()
panic("Can't route for host: " + inAddr)
}
r.Unlock()
}
}
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
// This is an internal router function, the caller must hold the lock
func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control {
if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok {
p.FromIp = newAddr.IP
p.FromPort = uint16(newAddr.Port)
}
c, ok := r.inNat[toAddr]
if ok {
sHost, sPort, err := net.SplitHostPort(toAddr)
if err != nil {
panic(err)
}
port, err := strconv.Atoi(sPort)
if err != nil {
panic(err)
}
r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{
IP: net.ParseIP(sHost),
Port: port,
}
return c
}
return r.controls[toAddr]
}
func (r *R) formatUdpPacket(p *packet) string {
packet := gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
if v4 == nil {
panic("not an ipv4 packet")
}
from := "unknown"
if c, ok := r.vpnControls[iputil.Ip2VpnIp(v4.SrcIP)]; ok {
from = c.GetUDPAddr()
}
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
if udp == nil {
panic("not a udp packet")
}
data := packet.ApplicationLayer()
return fmt.Sprintf(
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
strings.Replace(from, ":", "-", 1),
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
udp.SrcPort,
udp.DstPort,
string(data.Payload()),
)
}

View File

@@ -7,11 +7,9 @@ pki:
ca: /etc/nebula/ca.crt ca: /etc/nebula/ca.crt
cert: /etc/nebula/host.crt cert: /etc/nebula/host.crt
key: /etc/nebula/host.key key: /etc/nebula/host.key
# blocklist is a list of certificate fingerprints that we will refuse to talk to #blocklist is a list of certificate fingerprints that we will refuse to talk to
#blocklist: #blocklist:
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72 # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
#disconnect_invalid: false
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network). # The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel. # A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
@@ -47,9 +45,8 @@ lighthouse:
# allowed. You can provide CIDRs here with `true` to allow and `false` to # allowed. You can provide CIDRs here with `true` to allow and `false` to
# deny. The most specific CIDR rule applies to each remote. If all rules are # deny. The most specific CIDR rule applies to each remote. If all rules are
# "allow", the default will be "deny", and vice-versa. If both "allow" and # "allow", the default will be "deny", and vice-versa. If both "allow" and
# "deny" IPv4 rules are present, then you MUST set a rule for "0.0.0.0/0" as # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
# the default. Similarly if both "allow" and "deny" IPv6 rules are present, # default.
# then you MUST set a rule for "::/0" as the default.
#remote_allow_list: #remote_allow_list:
# Example to block IPs from this subnet from being used for remote IPs. # Example to block IPs from this subnet from being used for remote IPs.
#"172.16.0.0/12": false #"172.16.0.0/12": false
@@ -59,14 +56,6 @@ lighthouse:
#"10.0.0.0/8": false #"10.0.0.0/8": false
#"10.42.42.0/24": true #"10.42.42.0/24": true
# EXPERIMENTAL: This option may change or disappear in the future.
# Optionally allows the definition of remote_allow_list blocks
# specific to an inside VPN IP CIDR.
#remote_allow_ranges:
# This rule would only allow only private IPs for this VPN range
#"10.42.42.0/24":
#"192.168.0.0/16": true
# local_allow_list allows you to filter which local IP addresses we advertise # local_allow_list allows you to filter which local IP addresses we advertise
# to the lighthouses. This uses the same logic as `remote_allow_list`, but # to the lighthouses. This uses the same logic as `remote_allow_list`, but
# additionally, you can specify an `interfaces` map of regular expressions # additionally, you can specify an `interfaces` map of regular expressions
@@ -82,32 +71,9 @@ lighthouse:
# Example to only advertise this subnet to the lighthouse. # Example to only advertise this subnet to the lighthouse.
#"10.0.0.0/8": true #"10.0.0.0/8": true
# advertise_addrs are routable addresses that will be included along with discovered addresses to report to the
# lighthouse, the format is "ip:port". `port` can be `0`, in which case the actual listening port will be used in its
# place, useful if `listen.port` is set to 0.
# This option is mainly useful when there are static ip addresses the host can be reached at that nebula can not
# typically discover on its own. Examples being port forwarding or multiple paths to the internet.
#advertise_addrs:
#- "1.1.1.1:4242"
#- "1.2.3.4:0" # port will be replaced with the real listening port
# EXPERIMENTAL: This option may change or disappear in the future.
# This setting allows us to "guess" what the remote might be for a host
# while we wait for the lighthouse response.
#calculated_remotes:
# For any Nebula IPs in 10.0.10.0/24, this will apply the mask and add
# the calculated IP as an initial remote (while we wait for the response
# from the lighthouse). Both CIDRs must have the same mask size.
# For example, Nebula IP 10.0.10.123 will have a calculated remote of
# 192.168.1.123
#10.0.10.0/24:
#- mask: 192.168.1.0/24
# port: 4242
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined, # Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
# however using port 0 will dynamically assign a port and is recommended for roaming nodes. # however using port 0 will dynamically assign a port and is recommended for roaming nodes.
listen: listen:
# To listen on both any ipv4 and ipv6 use "::"
host: 0.0.0.0 host: 0.0.0.0
port: 4242 port: 4242
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg) # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
@@ -119,19 +85,6 @@ listen:
# max, net.core.rmem_max and net.core.wmem_max # max, net.core.rmem_max and net.core.wmem_max
#read_buffer: 10485760 #read_buffer: 10485760
#write_buffer: 10485760 #write_buffer: 10485760
# By default, Nebula replies to packets it has no tunnel for with a "recv_error" packet. This packet helps speed up reconnection
# in the case that Nebula on either side did not shut down cleanly. This response can be abused as a way to discover if Nebula is running
# on a host though. This option lets you configure if you want to send "recv_error" packets always, never, or only to private network remotes.
# valid values: always, never, private
# This setting is reloadable.
#send_recv_error: always
# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
# UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
# device and SO_REUSEPORT on the UDP socket to allow multiple queues.
# This option is only supported on Linux.
#routines: 1
punchy: punchy:
# Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
@@ -142,21 +95,16 @@ punchy:
# Default is false # Default is false
#respond: true #respond: true
# delays a punch response for misbehaving NATs, default is 1 second. # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
#delay: 1s #delay: 1s
# set the delay before attempting punchy.respond. Default is 5 seconds. respond must be true to take effect. # Cipher allows you to choose between the available ciphers for your network.
#respond_delay: 5s
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously! # IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
#cipher: aes #cipher: chachapoly
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest # Local range is used to define a hint about the local network range, which speeds up discovering the fastest
# path to a network adjacent nebula node. # path to a network adjacent nebula node.
# NOTE: the previous option "local_range" only allowed definition of a single range #local_range: "172.16.0.0/24"
# and has been deprecated for "preferred_ranges"
#preferred_ranges: ["172.16.0.0/24"]
# sshd can expose informational and administrative functions via ssh this is a # sshd can expose informational and administrative functions via ssh this is a
#sshd: #sshd:
@@ -174,27 +122,11 @@ punchy:
#keys: #keys:
#- "ssh public key string" #- "ssh public key string"
# EXPERIMENTAL: relay support for networks that can't establish direct connections.
relay:
# Relays are a list of Nebula IP's that peers can use to relay packets to me.
# IPs in this list must have am_relay set to true in their configs, otherwise
# they will reject relay requests.
#relays:
#- 192.168.100.1
#- <other Nebula VPN IPs of hosts used as relays to access me>
# Set am_relay to true to permit other hosts to list my IP in their relays config. Default false.
am_relay: false
# Set use_relays to false to prevent this instance from attempting to establish connections through relays.
# default true
use_relays: true
# Configure the private interface. Note: addr is baked into the nebula certificate # Configure the private interface. Note: addr is baked into the nebula certificate
tun: tun:
# When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root) # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
disabled: false disabled: false
# Name of the device. If not set, a default will be chosen by the OS. # Name of the device
# For macOS: if set, must be in the form `utun[0-9]+`.
# For FreeBSD: Required to be set, must be in the form `tun[0-9]+`.
dev: nebula1 dev: nebula1
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
drop_local_broadcast: false drop_local_broadcast: false
@@ -204,28 +136,18 @@ tun:
tx_queue: 500 tx_queue: 500
# Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
mtu: 1300 mtu: 1300
# Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
routes: routes:
#- mtu: 8800 #- mtu: 8800
# route: 10.0.0.0/16 # route: 10.0.0.0/16
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
# `mtu`: will default to tun mtu if this option is not specified
# `metric`: will default to 0 if this option is not specified
# `install`: will default to true, controls whether this route is installed in the systems routing table.
unsafe_routes: unsafe_routes:
#- route: 172.16.1.0/24 #- route: 172.16.1.0/24
# via: 192.168.100.99 # via: 192.168.100.99
# mtu: 1300 # mtu: 1300 #mtu will default to tun mtu if this option is not sepcified
# metric: 100
# install: true
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
# in nebula configuration files. Default false, not reloadable.
#use_system_route_table: false
# TODO # TODO
# Configure logging level # Configure logging level
@@ -268,33 +190,25 @@ logging:
# e.g.: `lighthouse.rx.HostQuery` # e.g.: `lighthouse.rx.HostQuery`
#lighthouse_metrics: false #lighthouse_metrics: false
# Handshake Manager Settings # Handshake Manger Settings
#handshakes: #handshakes:
# Handshakes are sent to all known addresses at each interval with a linear backoff, # Total time to try a handshake = sequence of `try_interval * retries`
# Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout # With 100ms interval and 20 retries it is 23.5 seconds
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
#try_interval: 100ms #try_interval: 100ms
#retries: 20 #retries: 20
# wait_rotation is the number of handshake attempts to do before starting to try non-local IP addresses
#wait_rotation: 5
# trigger_buffer is the size of the buffer channel for quickly sending handshakes # trigger_buffer is the size of the buffer channel for quickly sending handshakes
# after receiving the response for lighthouse queries # after receiving the response for lighthouse queries
#trigger_buffer: 64 #trigger_buffer: 64
# Nebula security group configuration # Nebula security group configuration
firewall: firewall:
# Action to take when a packet is not allowed by the firewall rules.
# Can be one of:
# `drop` (default): silently drop the packet.
# `reject`: send a reject reply.
# - For TCP, this will be a RST "Connection Reset" packet.
# - For other protocols, this will be an ICMP port unreachable packet.
outbound_action: drop
inbound_action: drop
conntrack: conntrack:
tcp_timeout: 12m tcp_timeout: 12m
udp_timeout: 3m udp_timeout: 3m
default_timeout: 10m default_timeout: 10m
max_connections: 100000
# The firewall is default deny. There is no way to write a deny rule. # The firewall is default deny. There is no way to write a deny rule.
# Rules are comprised of a protocol, port, and one or more of host, group, or CIDR # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
@@ -305,8 +219,7 @@ firewall:
# host: `any` or a literal hostname, ie `test-host` # host: `any` or a literal hostname, ie `test-host`
# group: `any` or a literal group name, ie `default-group` # group: `any` or a literal group name, ie `default-group`
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
# cidr: a remote CIDR, `0.0.0.0/0` is any. # cidr: a CIDR, `0.0.0.0/0` is any.
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
# ca_name: An issuing CA name # ca_name: An issuing CA name
# ca_sha: An issuing CA shasum # ca_sha: An issuing CA shasum

View File

@@ -119,6 +119,7 @@ firewall:
tcp_timeout: 12m tcp_timeout: 12m
udp_timeout: 3m udp_timeout: 3m
default_timeout: 10m default_timeout: 10m
max_connections: 100,000
inbound: inbound:
- proto: icmp - proto: icmp

View File

@@ -1,11 +1,12 @@
[Unit] [Unit]
Description=Nebula overlay networking tool Description=nebula
Wants=basic.target network-online.target nss-lookup.target time-sync.target Wants=basic.target
After=basic.target network.target network-online.target After=basic.target network.target
Before=sshd.service
[Service] [Service]
SyslogIdentifier=nebula SyslogIdentifier=nebula
StandardOutput=syslog
StandardError=syslog
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
Restart=always Restart=always

View File

@@ -65,6 +65,7 @@ firewall:
tcp_timeout: 12m tcp_timeout: 12m
udp_timeout: 3m udp_timeout: 3m
default_timeout: 10m default_timeout: 10m
max_connections: 100,000
inbound: inbound:
- proto: icmp - proto: icmp

View File

@@ -1,34 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>KeepAlive</key>
<true/>
<key>Label</key>
<string>net.defined.nebula</string>
<key>WorkingDirectory</key>
<string>/Users/{username}/.local/bin/nebula</string>
<key>LimitLoadToSessionType</key>
<array>
<string>Aqua</string>
<string>Background</string>
<string>LoginWindow</string>
<string>StandardIO</string>
<string>System</string>
</array>
<key>ProgramArguments</key>
<array>
<string>./nebula</string>
<string>-config</string>
<string>./config.yml</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>StandardErrorPath</key>
<string>./nebula.log</string>
<key>StandardOutPath</key>
<string>./nebula.log</string>
<key>UserName</key>
<string>root</string>
</dict>
</plist>

View File

@@ -1,11 +1,12 @@
[Unit] [Unit]
Description=Nebula overlay networking tool Description=nebula
Wants=basic.target network-online.target nss-lookup.target time-sync.target Wants=basic.target
After=basic.target network.target network-online.target After=basic.target network.target
Before=sshd.service
[Service] [Service]
SyslogIdentifier=nebula SyslogIdentifier=nebula
StandardOutput=syslog
StandardError=syslog
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
Restart=always Restart=always

View File

@@ -4,6 +4,7 @@ import (
"crypto/sha256" "crypto/sha256"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@@ -16,16 +17,23 @@ import (
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cidr" )
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall" const (
fwProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
fwProtoTCP = 6
fwProtoUDP = 17
fwProtoICMP = 1
fwPortAny = 0 // Special value for matching `port: any`
fwPortFragment = -1 // Special value for matching `port: fragment`
) )
const tcpACK = 0x10 const tcpACK = 0x10
const tcpFIN = 0x01 const tcpFIN = 0x01
type FirewallInterface interface { type FirewallInterface interface {
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error
} }
type conn struct { type conn struct {
@@ -47,9 +55,6 @@ type Firewall struct {
InRules *FirewallTable InRules *FirewallTable
OutRules *FirewallTable OutRules *FirewallTable
InSendReject bool
OutSendReject bool
//TODO: we should have many more options for TCP, an option for ICMP, and mimic the kernel a bit better //TODO: we should have many more options for TCP, an option for ICMP, and mimic the kernel a bit better
// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt // https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
TCPTimeout time.Duration //linux: 5 days max TCPTimeout time.Duration //linux: 5 days max
@@ -57,30 +62,20 @@ type Firewall struct {
DefaultTimeout time.Duration //linux: 600s DefaultTimeout time.Duration //linux: 600s
// Used to ensure we don't emit local packets for ips we don't own // Used to ensure we don't emit local packets for ips we don't own
localIps *cidr.Tree4 localIps *CIDRTree
rules string rules string
rulesVersion uint16 rulesVersion uint16
trackTCPRTT bool trackTCPRTT bool
metricTCPRTT metrics.Histogram metricTCPRTT metrics.Histogram
incomingMetrics firewallMetrics
outgoingMetrics firewallMetrics
l *logrus.Logger
}
type firewallMetrics struct {
droppedLocalIP metrics.Counter
droppedRemoteIP metrics.Counter
droppedNoRule metrics.Counter
} }
type FirewallConntrack struct { type FirewallConntrack struct {
sync.Mutex sync.Mutex
Conns map[firewall.Packet]*conn Conns map[FirewallPacket]*conn
TimerWheel *TimerWheel[firewall.Packet] TimerWheel *TimerWheel
} }
type FirewallTable struct { type FirewallTable struct {
@@ -106,20 +101,61 @@ type FirewallCA struct {
} }
type FirewallRule struct { type FirewallRule struct {
// Any makes Hosts, Groups, CIDR and LocalCIDR irrelevant // Any makes Hosts, Groups, and CIDR irrelevant
Any bool Any bool
Hosts map[string]struct{} Hosts map[string]struct{}
Groups [][]string Groups [][]string
CIDR *cidr.Tree4 CIDR *CIDRTree
LocalCIDR *cidr.Tree4
} }
// Even though ports are uint16, int32 maps are faster for lookup // Even though ports are uint16, int32 maps are faster for lookup
// Plus we can use `-1` for fragment rules // Plus we can use `-1` for fragment rules
type firewallPort map[int32]*FirewallCA type firewallPort map[int32]*FirewallCA
type FirewallPacket struct {
LocalIP uint32
RemoteIP uint32
LocalPort uint16
RemotePort uint16
Protocol uint8
Fragment bool
}
func (fp *FirewallPacket) Copy() *FirewallPacket {
return &FirewallPacket{
LocalIP: fp.LocalIP,
RemoteIP: fp.RemoteIP,
LocalPort: fp.LocalPort,
RemotePort: fp.RemotePort,
Protocol: fp.Protocol,
Fragment: fp.Fragment,
}
}
func (fp FirewallPacket) MarshalJSON() ([]byte, error) {
var proto string
switch fp.Protocol {
case fwProtoTCP:
proto = "tcp"
case fwProtoICMP:
proto = "icmp"
case fwProtoUDP:
proto = "udp"
default:
proto = fmt.Sprintf("unknown %v", fp.Protocol)
}
return json.Marshal(m{
"LocalIP": int2ip(fp.LocalIP).String(),
"RemoteIP": int2ip(fp.RemoteIP).String(),
"LocalPort": fp.LocalPort,
"RemotePort": fp.RemotePort,
"Protocol": proto,
"Fragment": fp.Fragment,
})
}
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts. // NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall { func NewFirewall(tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
//TODO: error on 0 duration //TODO: error on 0 duration
var min, max time.Duration var min, max time.Duration
@@ -137,7 +173,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
max = defaultTimeout max = defaultTimeout
} }
localIps := cidr.NewTree4() localIps := NewCIDRTree()
for _, ip := range c.Details.Ips { for _, ip := range c.Details.Ips {
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{}) localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
} }
@@ -148,8 +184,8 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
return &Firewall{ return &Firewall{
Conntrack: &FirewallConntrack{ Conntrack: &FirewallConntrack{
Conns: make(map[firewall.Packet]*conn), Conns: make(map[FirewallPacket]*conn),
TimerWheel: NewTimerWheel[firewall.Packet](min, max), TimerWheel: NewTimerWheel(min, max),
}, },
InRules: newFirewallTable(), InRules: newFirewallTable(),
OutRules: newFirewallTable(), OutRules: newFirewallTable(),
@@ -157,25 +193,12 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
UDPTimeout: UDPTimeout, UDPTimeout: UDPTimeout,
DefaultTimeout: defaultTimeout, DefaultTimeout: defaultTimeout,
localIps: localIps, localIps: localIps,
l: l,
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)), metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
incomingMetrics: firewallMetrics{
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
droppedNoRule: metrics.GetOrRegisterCounter("firewall.incoming.dropped.no_rule", nil),
},
outgoingMetrics: firewallMetrics{
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.local_ip", nil),
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.remote_ip", nil),
droppedNoRule: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.no_rule", nil),
},
} }
} }
func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *config.C) (*Firewall, error) { func NewFirewallFromConfig(nc *cert.NebulaCertificate, c *Config) (*Firewall, error) {
fw := NewFirewall( fw := NewFirewall(
l,
c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12), c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12),
c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3), c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3),
c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10), c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10),
@@ -183,34 +206,12 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
//TODO: max_connections //TODO: max_connections
) )
inboundAction := c.GetString("firewall.inbound_action", "drop") err := AddFirewallRulesFromConfig(false, c, fw)
switch inboundAction {
case "reject":
fw.InSendReject = true
case "drop":
fw.InSendReject = false
default:
l.WithField("action", inboundAction).Warn("invalid firewall.inbound_action, defaulting to `drop`")
fw.InSendReject = false
}
outboundAction := c.GetString("firewall.outbound_action", "drop")
switch outboundAction {
case "reject":
fw.OutSendReject = true
case "drop":
fw.OutSendReject = false
default:
l.WithField("action", inboundAction).Warn("invalid firewall.outbound_action, defaulting to `drop`")
fw.OutSendReject = false
}
err := AddFirewallRulesFromConfig(l, false, c, fw)
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = AddFirewallRulesFromConfig(l, true, c, fw) err = AddFirewallRulesFromConfig(true, c, fw)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -219,22 +220,18 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
} }
// AddRule properly creates the in memory rule structure for a firewall table. // AddRule properly creates the in memory rule structure for a firewall table.
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error { func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS // Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
// https://github.com/golang/go/issues/14131 // https://github.com/golang/go/issues/14131
sIp := "" sIp := ""
if ip != nil { if ip != nil {
sIp = ip.String() sIp = ip.String()
} }
lIp := ""
if localIp != nil {
lIp = localIp.String()
}
// We need this rule string because we generate a hash. Removing this will break firewall reload. // We need this rule string because we generate a hash. Removing this will break firewall reload.
ruleString := fmt.Sprintf( ruleString := fmt.Sprintf(
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, localIp: %v, caName: %v, caSha: %s", "incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s",
incoming, proto, startPort, endPort, groups, host, sIp, lIp, caName, caSha, incoming, proto, startPort, endPort, groups, host, sIp, caName, caSha,
) )
f.rules += ruleString + "\n" f.rules += ruleString + "\n"
@@ -242,7 +239,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
if !incoming { if !incoming {
direction = "outgoing" direction = "outgoing"
} }
f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "localIp": lIp, "caName": caName, "caSha": caSha}). l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "caName": caName, "caSha": caSha}).
Info("Firewall rule added") Info("Firewall rule added")
var ( var (
@@ -257,19 +254,19 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
} }
switch proto { switch proto {
case firewall.ProtoTCP: case fwProtoTCP:
fp = ft.TCP fp = ft.TCP
case firewall.ProtoUDP: case fwProtoUDP:
fp = ft.UDP fp = ft.UDP
case firewall.ProtoICMP: case fwProtoICMP:
fp = ft.ICMP fp = ft.ICMP
case firewall.ProtoAny: case fwProtoAny:
fp = ft.AnyProto fp = ft.AnyProto
default: default:
return fmt.Errorf("unknown protocol %v", proto) return fmt.Errorf("unknown protocol %v", proto)
} }
return fp.addRule(startPort, endPort, groups, host, ip, localIp, caName, caSha) return fp.addRule(startPort, endPort, groups, host, ip, caName, caSha)
} }
// GetRuleHash returns a hash representation of all inbound and outbound rules // GetRuleHash returns a hash representation of all inbound and outbound rules
@@ -278,7 +275,7 @@ func (f *Firewall) GetRuleHash() string {
return hex.EncodeToString(sum[:]) return hex.EncodeToString(sum[:])
} }
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error { func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterface) error {
var table string var table string
if inbound { if inbound {
table = "firewall.inbound" table = "firewall.inbound"
@@ -286,7 +283,7 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
table = "firewall.outbound" table = "firewall.outbound"
} }
r := c.Get(table) r := config.Get(table)
if r == nil { if r == nil {
return nil return nil
} }
@@ -298,7 +295,7 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
for i, t := range rs { for i, t := range rs {
var groups []string var groups []string
r, err := convertRule(l, t, table, i) r, err := convertRule(t, table, i)
if err != nil { if err != nil {
return fmt.Errorf("%s rule #%v; %s", table, i, err) return fmt.Errorf("%s rule #%v; %s", table, i, err)
} }
@@ -307,8 +304,8 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
return fmt.Errorf("%s rule #%v; only one of port or code should be provided", table, i) return fmt.Errorf("%s rule #%v; only one of port or code should be provided", table, i)
} }
if r.Host == "" && len(r.Groups) == 0 && r.Group == "" && r.Cidr == "" && r.LocalCidr == "" && r.CAName == "" && r.CASha == "" { if r.Host == "" && len(r.Groups) == 0 && r.Group == "" && r.Cidr == "" && r.CAName == "" && r.CASha == "" {
return fmt.Errorf("%s rule #%v; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided", table, i) return fmt.Errorf("%s rule #%v; at least one of host, group, cidr, ca_name, or ca_sha must be provided", table, i)
} }
if len(r.Groups) > 0 { if len(r.Groups) > 0 {
@@ -341,13 +338,13 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
var proto uint8 var proto uint8
switch r.Proto { switch r.Proto {
case "any": case "any":
proto = firewall.ProtoAny proto = fwProtoAny
case "tcp": case "tcp":
proto = firewall.ProtoTCP proto = fwProtoTCP
case "udp": case "udp":
proto = firewall.ProtoUDP proto = fwProtoUDP
case "icmp": case "icmp":
proto = firewall.ProtoICMP proto = fwProtoICMP
default: default:
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto) return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
} }
@@ -360,15 +357,7 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
} }
} }
var localCidr *net.IPNet err = fw.AddRule(inbound, proto, startPort, endPort, groups, r.Host, cidr, r.CAName, r.CASha)
if r.LocalCidr != "" {
_, localCidr, err = net.ParseCIDR(r.LocalCidr)
if err != nil {
return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err)
}
}
err = fw.AddRule(inbound, proto, startPort, endPort, groups, r.Host, cidr, localCidr, r.CAName, r.CASha)
if err != nil { if err != nil {
return fmt.Errorf("%s rule #%v; `%s`", table, i, err) return fmt.Errorf("%s rule #%v; `%s`", table, i, err)
} }
@@ -383,29 +372,26 @@ var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
// Drop returns an error if the packet should be dropped, explaining why. It // Drop returns an error if the packet should be dropped, explaining why. It
// returns nil if the packet should not be dropped. // returns nil if the packet should not be dropped.
func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error { func (f *Firewall) Drop(packet []byte, fp FirewallPacket, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool) error {
// Check if we spoke to this tuple, if we did then allow this packet // Check if we spoke to this tuple, if we did then allow this packet
if f.inConns(packet, fp, incoming, h, caPool, localCache) { if f.inConns(packet, fp, incoming, h, caPool) {
return nil return nil
} }
// Make sure remote address matches nebula certificate // Make sure remote address matches nebula certificate
if remoteCidr := h.remoteCidr; remoteCidr != nil { if remoteCidr := h.remoteCidr; remoteCidr != nil {
if remoteCidr.Contains(fp.RemoteIP) == nil { if remoteCidr.Contains(fp.RemoteIP) == nil {
f.metrics(incoming).droppedRemoteIP.Inc(1)
return ErrInvalidRemoteIP return ErrInvalidRemoteIP
} }
} else { } else {
// Simple case: Certificate has one IP and no subnets // Simple case: Certificate has one IP and no subnets
if fp.RemoteIP != h.vpnIp { if fp.RemoteIP != h.hostId {
f.metrics(incoming).droppedRemoteIP.Inc(1)
return ErrInvalidRemoteIP return ErrInvalidRemoteIP
} }
} }
// Make sure we are supposed to be handling this local ip address // Make sure we are supposed to be handling this local ip address
if f.localIps.Contains(fp.LocalIP) == nil { if f.localIps.Contains(fp.LocalIP) == nil {
f.metrics(incoming).droppedLocalIP.Inc(1)
return ErrInvalidLocalIP return ErrInvalidLocalIP
} }
@@ -416,7 +402,6 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
// We now know which firewall table to check against // We now know which firewall table to check against
if !table.match(fp, incoming, h.ConnectionState.peerCert, caPool) { if !table.match(fp, incoming, h.ConnectionState.peerCert, caPool) {
f.metrics(incoming).droppedNoRule.Inc(1)
return ErrNoMatchingRule return ErrNoMatchingRule
} }
@@ -426,14 +411,6 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
return nil return nil
} }
func (f *Firewall) metrics(incoming bool) firewallMetrics {
if incoming {
return f.incomingMetrics
} else {
return f.outgoingMetrics
}
}
// Destroy cleans up any known cyclical references so the object can be free'd my GC. This should be called if a new // Destroy cleans up any known cyclical references so the object can be free'd my GC. This should be called if a new
// firewall object is created // firewall object is created
func (f *Firewall) Destroy() { func (f *Firewall) Destroy() {
@@ -449,12 +426,7 @@ func (f *Firewall) EmitStats() {
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion)) metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
} }
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool { func (f *Firewall) inConns(packet []byte, fp FirewallPacket, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool) bool {
if localCache != nil {
if _, ok := localCache[fp]; ok {
return true
}
}
conntrack := f.Conntrack conntrack := f.Conntrack
conntrack.Lock() conntrack.Lock()
@@ -481,8 +453,8 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
// We now know which firewall table to check against // We now know which firewall table to check against
if !table.match(fp, c.incoming, h.ConnectionState.peerCert, caPool) { if !table.match(fp, c.incoming, h.ConnectionState.peerCert, caPool) {
if f.l.Level >= logrus.DebugLevel { if l.Level >= logrus.DebugLevel {
h.logger(f.l). h.logger().
WithField("fwPacket", fp). WithField("fwPacket", fp).
WithField("incoming", c.incoming). WithField("incoming", c.incoming).
WithField("rulesVersion", f.rulesVersion). WithField("rulesVersion", f.rulesVersion).
@@ -494,8 +466,8 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
return false return false
} }
if f.l.Level >= logrus.DebugLevel { if l.Level >= logrus.DebugLevel {
h.logger(f.l). h.logger().
WithField("fwPacket", fp). WithField("fwPacket", fp).
WithField("incoming", c.incoming). WithField("incoming", c.incoming).
WithField("rulesVersion", f.rulesVersion). WithField("rulesVersion", f.rulesVersion).
@@ -507,14 +479,14 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
} }
switch fp.Protocol { switch fp.Protocol {
case firewall.ProtoTCP: case fwProtoTCP:
c.Expires = time.Now().Add(f.TCPTimeout) c.Expires = time.Now().Add(f.TCPTimeout)
if incoming { if incoming {
f.checkTCPRTT(c, packet) f.checkTCPRTT(c, packet)
} else { } else {
setTCPRTTTracking(c, packet) setTCPRTTTracking(c, packet)
} }
case firewall.ProtoUDP: case fwProtoUDP:
c.Expires = time.Now().Add(f.UDPTimeout) c.Expires = time.Now().Add(f.UDPTimeout)
default: default:
c.Expires = time.Now().Add(f.DefaultTimeout) c.Expires = time.Now().Add(f.DefaultTimeout)
@@ -522,24 +494,20 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
conntrack.Unlock() conntrack.Unlock()
if localCache != nil {
localCache[fp] = struct{}{}
}
return true return true
} }
func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) { func (f *Firewall) addConn(packet []byte, fp FirewallPacket, incoming bool) {
var timeout time.Duration var timeout time.Duration
c := &conn{} c := &conn{}
switch fp.Protocol { switch fp.Protocol {
case firewall.ProtoTCP: case fwProtoTCP:
timeout = f.TCPTimeout timeout = f.TCPTimeout
if !incoming { if !incoming {
setTCPRTTTracking(c, packet) setTCPRTTTracking(c, packet)
} }
case firewall.ProtoUDP: case fwProtoUDP:
timeout = f.UDPTimeout timeout = f.UDPTimeout
default: default:
timeout = f.DefaultTimeout timeout = f.DefaultTimeout
@@ -548,7 +516,6 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
conntrack := f.Conntrack conntrack := f.Conntrack
conntrack.Lock() conntrack.Lock()
if _, ok := conntrack.Conns[fp]; !ok { if _, ok := conntrack.Conns[fp]; !ok {
conntrack.TimerWheel.Advance(time.Now())
conntrack.TimerWheel.Add(fp, timeout) conntrack.TimerWheel.Add(fp, timeout)
} }
@@ -563,7 +530,7 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel // Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
// Caller must own the connMutex lock! // Caller must own the connMutex lock!
func (f *Firewall) evict(p firewall.Packet) { func (f *Firewall) evict(p FirewallPacket) {
//TODO: report a stat if the tcp rtt tracking was never resolved? //TODO: report a stat if the tcp rtt tracking was never resolved?
// Are we still tracking this conn? // Are we still tracking this conn?
conntrack := f.Conntrack conntrack := f.Conntrack
@@ -576,7 +543,6 @@ func (f *Firewall) evict(p firewall.Packet) {
// Timeout is in the future, re-add the timer // Timeout is in the future, re-add the timer
if newT > 0 { if newT > 0 {
conntrack.TimerWheel.Advance(time.Now())
conntrack.TimerWheel.Add(p, newT) conntrack.TimerWheel.Add(p, newT)
return return
} }
@@ -585,21 +551,21 @@ func (f *Firewall) evict(p firewall.Packet) {
delete(conntrack.Conns, p) delete(conntrack.Conns, p)
} }
func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { func (ft *FirewallTable) match(p FirewallPacket, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
if ft.AnyProto.match(p, incoming, c, caPool) { if ft.AnyProto.match(p, incoming, c, caPool) {
return true return true
} }
switch p.Protocol { switch p.Protocol {
case firewall.ProtoTCP: case fwProtoTCP:
if ft.TCP.match(p, incoming, c, caPool) { if ft.TCP.match(p, incoming, c, caPool) {
return true return true
} }
case firewall.ProtoUDP: case fwProtoUDP:
if ft.UDP.match(p, incoming, c, caPool) { if ft.UDP.match(p, incoming, c, caPool) {
return true return true
} }
case firewall.ProtoICMP: case fwProtoICMP:
if ft.ICMP.match(p, incoming, c, caPool) { if ft.ICMP.match(p, incoming, c, caPool) {
return true return true
} }
@@ -608,7 +574,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaC
return false return false
} }
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error { func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
if startPort > endPort { if startPort > endPort {
return fmt.Errorf("start port was lower than end port") return fmt.Errorf("start port was lower than end port")
} }
@@ -621,7 +587,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
} }
} }
if err := fp[i].addRule(groups, host, ip, localIp, caName, caSha); err != nil { if err := fp[i].addRule(groups, host, ip, caName, caSha); err != nil {
return err return err
} }
} }
@@ -629,7 +595,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
return nil return nil
} }
func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { func (fp firewallPort) match(p FirewallPacket, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
// We don't have any allowed ports, bail // We don't have any allowed ports, bail
if fp == nil { if fp == nil {
return false return false
@@ -638,7 +604,7 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
var port int32 var port int32
if p.Fragment { if p.Fragment {
port = firewall.PortFragment port = fwPortFragment
} else if incoming { } else if incoming {
port = int32(p.LocalPort) port = int32(p.LocalPort)
} else { } else {
@@ -649,16 +615,15 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
return true return true
} }
return fp[firewall.PortAny].match(p, c, caPool) return fp[fwPortAny].match(p, c, caPool)
} }
func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error { func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error {
fr := func() *FirewallRule { fr := func() *FirewallRule {
return &FirewallRule{ return &FirewallRule{
Hosts: make(map[string]struct{}), Hosts: make(map[string]struct{}),
Groups: make([][]string, 0), Groups: make([][]string, 0),
CIDR: cidr.NewTree4(), CIDR: NewCIDRTree(),
LocalCIDR: cidr.NewTree4(),
} }
} }
@@ -667,14 +632,14 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
fc.Any = fr() fc.Any = fr()
} }
return fc.Any.addRule(groups, host, ip, localIp) return fc.Any.addRule(groups, host, ip)
} }
if caSha != "" { if caSha != "" {
if _, ok := fc.CAShas[caSha]; !ok { if _, ok := fc.CAShas[caSha]; !ok {
fc.CAShas[caSha] = fr() fc.CAShas[caSha] = fr()
} }
err := fc.CAShas[caSha].addRule(groups, host, ip, localIp) err := fc.CAShas[caSha].addRule(groups, host, ip)
if err != nil { if err != nil {
return err return err
} }
@@ -684,7 +649,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
if _, ok := fc.CANames[caName]; !ok { if _, ok := fc.CANames[caName]; !ok {
fc.CANames[caName] = fr() fc.CANames[caName] = fr()
} }
err := fc.CANames[caName].addRule(groups, host, ip, localIp) err := fc.CANames[caName].addRule(groups, host, ip)
if err != nil { if err != nil {
return err return err
} }
@@ -693,7 +658,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
return nil return nil
} }
func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { func (fc *FirewallCA) match(p FirewallPacket, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
if fc == nil { if fc == nil {
return false return false
} }
@@ -716,18 +681,17 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool
return fc.CANames[s.Details.Name].match(p, c) return fc.CANames[s.Details.Name].match(p, c)
} }
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, localIp *net.IPNet) error { func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) error {
if fr.Any { if fr.Any {
return nil return nil
} }
if fr.isAny(groups, host, ip, localIp) { if fr.isAny(groups, host, ip) {
fr.Any = true fr.Any = true
// If it's any we need to wipe out any pre-existing rules to save on memory // If it's any we need to wipe out any pre-existing rules to save on memory
fr.Groups = make([][]string, 0) fr.Groups = make([][]string, 0)
fr.Hosts = make(map[string]struct{}) fr.Hosts = make(map[string]struct{})
fr.CIDR = cidr.NewTree4() fr.CIDR = NewCIDRTree()
fr.LocalCIDR = cidr.NewTree4()
} else { } else {
if len(groups) > 0 { if len(groups) > 0 {
fr.Groups = append(fr.Groups, groups) fr.Groups = append(fr.Groups, groups)
@@ -740,17 +704,13 @@ func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, loc
if ip != nil { if ip != nil {
fr.CIDR.AddCIDR(ip, struct{}{}) fr.CIDR.AddCIDR(ip, struct{}{})
} }
if localIp != nil {
fr.LocalCIDR.AddCIDR(localIp, struct{}{})
}
} }
return nil return nil
} }
func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPNet) bool { func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool {
if len(groups) == 0 && host == "" && ip == nil && localIp == nil { if len(groups) == 0 && host == "" && ip == nil {
return true return true
} }
@@ -768,14 +728,10 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPN
return true return true
} }
if localIp != nil && localIp.Contains(net.IPv4(0, 0, 0, 0)) {
return true
}
return false return false
} }
func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool { func (fr *FirewallRule) match(p FirewallPacket, c *cert.NebulaCertificate) bool {
if fr == nil { if fr == nil {
return false return false
} }
@@ -813,10 +769,6 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
return true return true
} }
if fr.LocalCIDR != nil && fr.LocalCIDR.Contains(p.LocalIP) != nil {
return true
}
// No host, group, or cidr matched, bye bye // No host, group, or cidr matched, bye bye
return false return false
} }
@@ -829,12 +781,11 @@ type rule struct {
Group string Group string
Groups []string Groups []string
Cidr string Cidr string
LocalCidr string
CAName string CAName string
CASha string CASha string
} }
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) { func convertRule(p interface{}, table string, i int) (rule, error) {
r := rule{} r := rule{}
m, ok := p.(map[interface{}]interface{}) m, ok := p.(map[interface{}]interface{})
@@ -855,7 +806,6 @@ func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, er
r.Proto = toString("proto", m) r.Proto = toString("proto", m)
r.Host = toString("host", m) r.Host = toString("host", m)
r.Cidr = toString("cidr", m) r.Cidr = toString("cidr", m)
r.LocalCidr = toString("local_cidr", m)
r.CAName = toString("ca_name", m) r.CAName = toString("ca_name", m)
r.CASha = toString("ca_sha", m) r.CASha = toString("ca_sha", m)
@@ -890,12 +840,12 @@ func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, er
func parsePort(s string) (startPort, endPort int32, err error) { func parsePort(s string) (startPort, endPort int32, err error) {
if s == "any" { if s == "any" {
startPort = firewall.PortAny startPort = fwPortAny
endPort = firewall.PortAny endPort = fwPortAny
} else if s == "fragment" { } else if s == "fragment" {
startPort = firewall.PortFragment startPort = fwPortFragment
endPort = firewall.PortFragment endPort = fwPortFragment
} else if strings.Contains(s, `-`) { } else if strings.Contains(s, `-`) {
sPorts := strings.SplitN(s, `-`, 2) sPorts := strings.SplitN(s, `-`, 2)
@@ -919,8 +869,8 @@ func parsePort(s string) (startPort, endPort int32, err error) {
startPort = int32(rStartPort) startPort = int32(rStartPort)
endPort = int32(rEndPort) endPort = int32(rEndPort)
if startPort == firewall.PortAny { if startPort == fwPortAny {
endPort = firewall.PortAny endPort = fwPortAny
} }
} else { } else {
@@ -935,7 +885,7 @@ func parsePort(s string) (startPort, endPort int32, err error) {
return return
} }
// TODO: write tests for these //TODO: write tests for these
func setTCPRTTTracking(c *conn, p []byte) { func setTCPRTTTracking(c *conn, p []byte) {
if c.Seq != 0 { if c.Seq != 0 {
return return

View File

@@ -1,59 +0,0 @@
package firewall
import (
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
)
// ConntrackCache is used as a local routine cache to know if a given flow
// has been seen in the conntrack table.
type ConntrackCache map[Packet]struct{}
type ConntrackCacheTicker struct {
cacheV uint64
cacheTick atomic.Uint64
cache ConntrackCache
}
func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
if d == 0 {
return nil
}
c := &ConntrackCacheTicker{
cache: ConntrackCache{},
}
go c.tick(d)
return c
}
func (c *ConntrackCacheTicker) tick(d time.Duration) {
for {
time.Sleep(d)
c.cacheTick.Add(1)
}
}
// Get checks if the cache ticker has moved to the next version before returning
// the map. If it has moved, we reset the map.
func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache {
if c == nil {
return nil
}
if tick := c.cacheTick.Load(); tick != c.cacheV {
c.cacheV = tick
if ll := len(c.cache); ll > 0 {
if l.Level == logrus.DebugLevel {
l.WithField("len", ll).Debug("resetting conntrack cache")
}
c.cache = make(ConntrackCache, ll)
}
}
return c.cache
}

View File

@@ -1,62 +0,0 @@
package firewall
import (
"encoding/json"
"fmt"
"github.com/slackhq/nebula/iputil"
)
type m map[string]interface{}
const (
ProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
ProtoTCP = 6
ProtoUDP = 17
ProtoICMP = 1
PortAny = 0 // Special value for matching `port: any`
PortFragment = -1 // Special value for matching `port: fragment`
)
type Packet struct {
LocalIP iputil.VpnIp
RemoteIP iputil.VpnIp
LocalPort uint16
RemotePort uint16
Protocol uint8
Fragment bool
}
func (fp *Packet) Copy() *Packet {
return &Packet{
LocalIP: fp.LocalIP,
RemoteIP: fp.RemoteIP,
LocalPort: fp.LocalPort,
RemotePort: fp.RemotePort,
Protocol: fp.Protocol,
Fragment: fp.Fragment,
}
}
func (fp Packet) MarshalJSON() ([]byte, error) {
var proto string
switch fp.Protocol {
case ProtoTCP:
proto = "tcp"
case ProtoICMP:
proto = "icmp"
case ProtoUDP:
proto = "udp"
default:
proto = fmt.Sprintf("unknown %v", fp.Protocol)
}
return json.Marshal(m{
"LocalIP": fp.LocalIP.String(),
"RemoteIP": fp.RemoteIP.String(),
"LocalPort": fp.LocalPort,
"RemotePort": fp.RemotePort,
"Protocol": proto,
"Fragment": fp.Fragment,
})
}

View File

@@ -11,17 +11,12 @@ import (
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestNewFirewall(t *testing.T) { func TestNewFirewall(t *testing.T) {
l := test.NewLogger()
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw := NewFirewall(time.Second, time.Minute, time.Hour, c)
conntrack := fw.Conntrack conntrack := fw.Conntrack
assert.NotNil(t, conntrack) assert.NotNil(t, conntrack)
assert.NotNil(t, conntrack.Conns) assert.NotNil(t, conntrack.Conns)
@@ -34,124 +29,130 @@ func TestNewFirewall(t *testing.T) {
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c) fw = NewFirewall(time.Second, time.Hour, time.Minute, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c) fw = NewFirewall(time.Hour, time.Second, time.Minute, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c) fw = NewFirewall(time.Hour, time.Minute, time.Second, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c) fw = NewFirewall(time.Minute, time.Hour, time.Second, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c) fw = NewFirewall(time.Minute, time.Second, time.Hour, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
} }
func TestFirewall_AddRule(t *testing.T) { func TestFirewall_AddRule(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw := NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.NotNil(t, fw.InRules) assert.NotNil(t, fw.InRules)
assert.NotNil(t, fw.OutRules) assert.NotNil(t, fw.OutRules)
_, ti, _ := net.ParseCIDR("1.2.3.4/32") _, ti, _ := net.ParseCIDR("1.2.3.4/32")
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoTCP, 1, 1, []string{}, "", nil, "", ""))
// An empty rule is any // An empty rule is any
assert.True(t, fw.InRules.TCP[1].Any.Any) assert.True(t, fw.InRules.TCP[1].Any.Any)
assert.Empty(t, fw.InRules.TCP[1].Any.Groups) assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts) assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.left)
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.right)
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.value)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "", ""))
assert.False(t, fw.InRules.UDP[1].Any.Any) assert.False(t, fw.InRules.UDP[1].Any.Any)
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1") assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts) assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.left)
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.right)
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.value)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoICMP, 1, 1, []string{}, "h1", nil, "", ""))
assert.False(t, fw.InRules.ICMP[1].Any.Any) assert.False(t, fw.InRules.ICMP[1].Any.Any)
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups) assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1") assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.left)
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.right)
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.value)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", "")) assert.Nil(t, fw.AddRule(false, fwProtoAny, 1, 1, []string{}, "", ti, "", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any) assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups) assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts) assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))) assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(ip2int(ti.IP)))
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name") assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha")) assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha"))
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha") assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
// Set any and clear fields // Set any and clear fields
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", "")) assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", ""))
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0]) assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1") assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))) assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(ip2int(ti.IP)))
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
// run twice just to make sure // run twice just to make sure
//TODO: these ANY rules should clear the CA firewall portion //TODO: these ANY rules should clear the CA firewall portion
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", "")) assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "any", nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups) assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts) assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.left)
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.right)
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.value)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", "")) assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "any", nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0") _, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", "")) assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "", anyIp, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
// Test error conditions // Test error conditions
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(time.Second, time.Minute, time.Hour, c)
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, nil, "", "")) assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", ""))
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, nil, "", "")) assert.Error(t, fw.AddRule(true, fwProtoAny, 10, 0, []string{}, "", nil, "", ""))
} }
func TestFirewall_Drop(t *testing.T) { func TestFirewall_Drop(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
p := firewall.Packet{ p := FirewallPacket{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
LocalPort: 10, 10,
RemotePort: 90, 90,
Protocol: firewall.ProtoUDP, fwProtoUDP,
Fragment: false, false,
} }
ipNet := net.IPNet{ ipNet := net.IPNet{
@@ -172,53 +173,53 @@ func TestFirewall_Drop(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), hostId: ip2int(ipNet.IP),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// Drop outbound // Drop outbound
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp), ErrNoMatchingRule)
// Allow inbound // Allow inbound
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp))
// Allow outbound because conntrack // Allow outbound because conntrack
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp))
// test remote mismatch // test remote mismatch
oldRemote := p.RemoteIP oldRemote := p.RemoteIP
p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10)) p.RemoteIP = ip2int(net.IPv4(1, 2, 3, 10))
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP) assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp), ErrInvalidRemoteIP)
p.RemoteIP = oldRemote p.RemoteIP = oldRemote
// ensure signer doesn't get in the way of group checks // ensure signer doesn't get in the way of group checks
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum"))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad"))
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp), ErrNoMatchingRule)
// test caSha doesn't drop on match // test caSha doesn't drop on match
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad"))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum"))
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp))
// ensure ca name doesn't get in the way of group checks // ensure ca name doesn't get in the way of group checks
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", ""))
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp), ErrNoMatchingRule)
// test caName doesn't drop on match // test caName doesn't drop on match
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", ""))
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp))
} }
func BenchmarkFirewallTable_match(b *testing.B) { func BenchmarkFirewallTable_match(b *testing.B) {
@@ -227,24 +228,24 @@ func BenchmarkFirewallTable_match(b *testing.B) {
} }
_, n, _ := net.ParseCIDR("172.1.1.1/32") _, n, _ := net.ParseCIDR("172.1.1.1/32")
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, "", "")
cp := cert.NewCAPool() cp := cert.NewCAPool()
b.Run("fail on proto", func(b *testing.B) { b.Run("fail on proto", func(b *testing.B) {
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp) ft.match(FirewallPacket{Protocol: fwProtoUDP}, true, c, cp)
} }
}) })
b.Run("fail on port", func(b *testing.B) { b.Run("fail on port", func(b *testing.B) {
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp) ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 1}, true, c, cp)
} }
}) })
@@ -258,7 +259,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp)
} }
}) })
@@ -270,7 +271,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp)
} }
}) })
@@ -282,12 +283,12 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp)
} }
}) })
b.Run("pass on ip", func(b *testing.B) { b.Run("pass on ip", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) ip := ip2int(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, InvertedGroups: map[string]struct{}{"nope": {}},
@@ -295,27 +296,14 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp) ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
} }
}) })
b.Run("pass on local ip", func(b *testing.B) { _ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "")
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
}
})
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
b.Run("pass on ip with any port", func(b *testing.B) { b.Run("pass on ip with any port", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) ip := ip2int(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, InvertedGroups: map[string]struct{}{"nope": {}},
@@ -323,36 +311,24 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp) ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
}
})
b.Run("pass on local ip with any port", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host",
},
}
for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
} }
}) })
} }
func TestFirewall_Drop2(t *testing.T) { func TestFirewall_Drop2(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
p := firewall.Packet{ p := FirewallPacket{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
LocalPort: 10, 10,
RemotePort: 90, 90,
Protocol: firewall.ProtoUDP, fwProtoUDP,
Fragment: false, false,
} }
ipNet := net.IPNet{ ipNet := net.IPNet{
@@ -371,7 +347,7 @@ func TestFirewall_Drop2(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), hostId: ip2int(ipNet.IP),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
@@ -389,29 +365,30 @@ func TestFirewall_Drop2(t *testing.T) {
} }
h1.CreateRemoteCIDR(&c1) h1.CreateRemoteCIDR(&c1)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// h1/c1 lacks the proper groups // h1/c1 lacks the proper groups
assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule) assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp), ErrNoMatchingRule)
// c has the proper groups // c has the proper groups
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp))
} }
func TestFirewall_Drop3(t *testing.T) { func TestFirewall_Drop3(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
p := firewall.Packet{ p := FirewallPacket{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
LocalPort: 1, 1,
RemotePort: 1, 1,
Protocol: firewall.ProtoUDP, fwProtoUDP,
Fragment: false, false,
} }
ipNet := net.IPNet{ ipNet := net.IPNet{
@@ -437,7 +414,7 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c1, peerCert: &c1,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), hostId: ip2int(ipNet.IP),
} }
h1.CreateRemoteCIDR(&c1) h1.CreateRemoteCIDR(&c1)
@@ -452,7 +429,7 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c2, peerCert: &c2,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), hostId: ip2int(ipNet.IP),
} }
h2.CreateRemoteCIDR(&c2) h2.CreateRemoteCIDR(&c2)
@@ -467,37 +444,38 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c3, peerCert: &c3,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), hostId: ip2int(ipNet.IP),
} }
h3.CreateRemoteCIDR(&c3) h3.CreateRemoteCIDR(&c3)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 1, 1, []string{}, "host1", nil, "", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, nil, "", "signer-sha")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha"))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// c1 should pass because host match // c1 should pass because host match
assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp))
// c2 should pass because ca sha match // c2 should pass because ca sha match
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp))
// c3 should fail because no match // c3 should fail because no match
resetConntrack(fw) resetConntrack(fw)
assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp), ErrNoMatchingRule)
} }
func TestFirewall_DropConntrackReload(t *testing.T) { func TestFirewall_DropConntrackReload(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
p := firewall.Packet{ p := FirewallPacket{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), ip2int(net.IPv4(1, 2, 3, 4)),
LocalPort: 10, 10,
RemotePort: 90, 90,
Protocol: firewall.ProtoUDP, fwProtoUDP,
Fragment: false, false,
} }
ipNet := net.IPNet{ ipNet := net.IPNet{
@@ -518,39 +496,39 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), hostId: ip2int(ipNet.IP),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// Drop outbound // Drop outbound
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp), ErrNoMatchingRule)
// Allow inbound // Allow inbound
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp))
// Allow outbound because conntrack // Allow outbound because conntrack
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp))
oldFw := fw oldFw := fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 10, 10, []string{"any"}, "", nil, "", ""))
fw.Conntrack = oldFw.Conntrack fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1 fw.rulesVersion = oldFw.rulesVersion + 1
// Allow outbound because conntrack and new rules allow port 10 // Allow outbound because conntrack and new rules allow port 10
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp))
oldFw = fw oldFw = fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, fwProtoAny, 11, 11, []string{"any"}, "", nil, "", ""))
fw.Conntrack = oldFw.Conntrack fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1 fw.rulesVersion = oldFw.rulesVersion + 1
// Drop outbound because conntrack doesn't match new ruleset // Drop outbound because conntrack doesn't match new ruleset
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp), ErrNoMatchingRule)
} }
func BenchmarkLookup(b *testing.B) { func BenchmarkLookup(b *testing.B) {
@@ -669,147 +647,124 @@ func Test_parsePort(t *testing.T) {
} }
func TestNewFirewallFromConfig(t *testing.T) { func TestNewFirewallFromConfig(t *testing.T) {
l := test.NewLogger()
// Test a bad rule definition // Test a bad rule definition
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
conf := config.NewC(l) conf := NewConfig()
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"}
_, err := NewFirewallFromConfig(l, c, conf) _, err := NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules") assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
// Test both port and code // Test both port and code
conf = config.NewC(l) conf = NewConfig()
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided") assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
// Test missing host, group, cidr, ca_name and ca_sha // Test missing host, group, cidr, ca_name and ca_sha
conf = config.NewC(l) conf = NewConfig()
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided") assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided")
// Test code/port error // Test code/port error
conf = config.NewC(l) conf = NewConfig()
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`") assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`") assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
// Test proto error // Test proto error
conf = config.NewC(l) conf = NewConfig()
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``") assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
// Test cidr parse error // Test cidr parse error
conf = config.NewC(l) conf = NewConfig()
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh") assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
// Test local_cidr parse error
conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
_, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; invalid CIDR address: testh")
// Test both group and groups // Test both group and groups
conf = config.NewC(l) conf = NewConfig()
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(c, conf)
assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided") assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
} }
func TestAddFirewallRulesFromConfig(t *testing.T) { func TestAddFirewallRulesFromConfig(t *testing.T) {
l := test.NewLogger()
// Test adding tcp rule // Test adding tcp rule
conf := config.NewC(l) conf := NewConfig()
mf := &mockFirewall{} mf := &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding udp rule // Test adding udp rule
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding icmp rule // Test adding icmp rule
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding any rule // Test adding any rule
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding rule with cidr
cidr := &net.IPNet{IP: net.ParseIP("10.0.0.0").To4(), Mask: net.IPv4Mask(255, 0, 0, 0)}
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: nil}, mf.lastCall)
// Test adding rule with local_cidr
conf = config.NewC(l)
mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: cidr}, mf.lastCall)
// Test adding rule with ca_sha // Test adding rule with ca_sha
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caSha: "12312313123"}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall)
// Test adding rule with ca_name // Test adding rule with ca_name
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caName: "root01"}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall)
// Test single group // Test single group
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
// Test single groups // Test single groups
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
// Test multiple AND groups // Test multiple AND groups
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall)
// Test Add error // Test Add error
conf = config.NewC(l) conf = NewConfig()
mf = &mockFirewall{} mf = &mockFirewall{}
mf.nextCallReturn = errors.New("test error") mf.nextCallReturn = errors.New("test error")
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`") assert.EqualError(t, AddFirewallRulesFromConfig(true, conf, mf), "firewall.inbound rule #0; `test error`")
} }
func TestTCPRTTTracking(t *testing.T) { func TestTCPRTTTracking(t *testing.T) {
@@ -904,16 +859,17 @@ func TestTCPRTTTracking(t *testing.T) {
} }
func TestFirewall_convertRule(t *testing.T) { func TestFirewall_convertRule(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
// Ensure group array of 1 is converted and a warning is printed // Ensure group array of 1 is converted and a warning is printed
c := map[interface{}]interface{}{ c := map[interface{}]interface{}{
"group": []interface{}{"group1"}, "group": []interface{}{"group1"},
} }
r, err := convertRule(l, c, "test", 1) r, err := convertRule(c, "test", 1)
assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value") assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value")
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "group1", r.Group) assert.Equal(t, "group1", r.Group)
@@ -924,7 +880,7 @@ func TestFirewall_convertRule(t *testing.T) {
"group": []interface{}{"group1", "group2"}, "group": []interface{}{"group1", "group2"},
} }
r, err = convertRule(l, c, "test", 1) r, err = convertRule(c, "test", 1)
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided") assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
@@ -934,7 +890,7 @@ func TestFirewall_convertRule(t *testing.T) {
"group": "group1", "group": "group1",
} }
r, err = convertRule(l, c, "test", 1) r, err = convertRule(c, "test", 1)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "group1", r.Group) assert.Equal(t, "group1", r.Group)
} }
@@ -947,7 +903,6 @@ type addRuleCall struct {
groups []string groups []string
host string host string
ip *net.IPNet ip *net.IPNet
localIp *net.IPNet
caName string caName string
caSha string caSha string
} }
@@ -957,7 +912,7 @@ type mockFirewall struct {
nextCallReturn error nextCallReturn error
} }
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error { func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
mf.lastCall = addRuleCall{ mf.lastCall = addRuleCall{
incoming: incoming, incoming: incoming,
proto: proto, proto: proto,
@@ -966,7 +921,6 @@ func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, end
groups: groups, groups: groups,
host: host, host: host,
ip: ip, ip: ip,
localIp: localIp,
caName: caName, caName: caName,
caSha: caSha, caSha: caSha,
} }
@@ -978,6 +932,6 @@ func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, end
func resetConntrack(fw *Firewall) { func resetConntrack(fw *Firewall) {
fw.Conntrack.Lock() fw.Conntrack.Lock()
fw.Conntrack.Conns = map[firewall.Packet]*conn{} fw.Conntrack.Conns = map[FirewallPacket]*conn{}
fw.Conntrack.Unlock() fw.Conntrack.Unlock()
} }

68
go.mod
View File

@@ -1,49 +1,33 @@
module github.com/slackhq/nebula module github.com/slackhq/nebula
go 1.20 go 1.12
require ( require (
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239
github.com/armon/go-radix v1.0.0 github.com/armon/go-radix v1.0.0
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
github.com/flynn/noise v1.0.0 github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/gogo/protobuf v1.3.2 github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6
github.com/google/gopacket v1.1.19 github.com/golang/protobuf v1.3.2
github.com/imdario/mergo v0.3.15 github.com/imdario/mergo v0.3.8
github.com/kardianos/service v1.2.2 github.com/kardianos/service v1.1.0
github.com/miekg/dns v1.1.54 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f github.com/kr/pretty v0.1.0 // indirect
github.com/prometheus/client_golang v1.15.1 github.com/miekg/dns v1.1.25
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c
github.com/sirupsen/logrus v1.9.0 github.com/prometheus/client_golang v1.2.1
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee // indirect
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 github.com/prometheus/procfs v0.0.8 // indirect
github.com/stretchr/testify v1.8.2 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563
github.com/vishvananda/netlink v1.1.0 github.com/sirupsen/logrus v1.4.2
golang.org/x/crypto v0.8.0 github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 github.com/stretchr/testify v1.6.1
golang.org/x/net v0.9.0 github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a
golang.org/x/sys v0.8.0 github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect
golang.org/x/term v0.8.0 golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553
golang.zx2c4.com/wireguard/windows v0.5.3 golang.org/x/sys v0.0.0-20191210023423-ac6580df4449
google.golang.org/protobuf v1.30.0 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.2.7
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/tools v0.8.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

239
go.sum
View File

@@ -1,249 +1,158 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0=
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/kardianos/service v1.1.0 h1:QV2SiEeWK42P0aEmGcsAgjApw/lRxkwopvT+Gu6t1/0=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kardianos/service v1.1.0/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c h1:G/mfx/MWYuaaGlHkZQBBXFAJiYnRt/GaOVxnRHjlxg4=
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg= github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c/go.mod h1:1yMri853KAI2pPAUnESjaqZj9JeImOUM+6A4GuuPmTs=
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f/go.mod h1:nwPd6pDNId/Xi16qtKrFHrauSwMNuvk+zcjk89wrnlA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee h1:iBZPTYkGLvdu6+A5TsMUJQkQX9Ad4aCEnSQtdxPuTCQ=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b h1:+y4hCMc/WKsDbAPsOQZgBSaSZ26uh2afyaWeVg/3s/c=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a h1:Bt1IVPhiCDMqwGrc2nnbIN4QKvJGx6SK2NzWBmW00ao=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

83
handler.go Normal file
View File

@@ -0,0 +1,83 @@
package nebula
func (f *Interface) newHook(w func([]byte) error) InsideHandler {
fn := func(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
f.decryptTo(w, hostInfo, header.MessageCounter, out, packet, fwPacket, nb)
}
return f.encrypted(fn)
}
func (f *Interface) encrypted(h InsideHandler) InsideHandler {
return func(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
if !f.handleEncrypted(ci, addr, header) {
return
}
h(hostInfo, ci, addr, header, out, packet, fwPacket, nb)
f.handleHostRoaming(hostInfo, addr)
f.connectionManager.In(hostInfo.hostId)
}
}
func (f *Interface) rxMetrics(h InsideHandler) InsideHandler {
return func(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
f.messageMetrics.Rx(header.Type, header.Subtype, 1)
h(hostInfo, ci, addr, header, out, packet, fwPacket, nb)
}
}
func (f *Interface) handleMessagePacket(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
f.decryptTo(f.inside.WriteRaw, hostInfo, header.MessageCounter, out, packet, fwPacket, nb)
}
func (f *Interface) handleLighthousePacket(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
d, err := f.decrypt(hostInfo, header.MessageCounter, out, packet, header, nb)
if err != nil {
hostInfo.logger().WithError(err).WithField("udpAddr", addr).
WithField("packet", packet).
Error("Failed to decrypt lighthouse packet")
//TODO: maybe after build 64 is out? 06/14/2018 - NB
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
return
}
f.lightHouse.HandleRequest(addr, hostInfo.hostId, d, hostInfo.GetCert(), f)
}
func (f *Interface) handleTestPacket(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
d, err := f.decrypt(hostInfo, header.MessageCounter, out, packet, header, nb)
if err != nil {
hostInfo.logger().WithError(err).WithField("udpAddr", addr).
WithField("packet", packet).
Error("Failed to decrypt test packet")
//TODO: maybe after build 64 is out? 06/14/2018 - NB
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
return
}
if header.Subtype == testRequest {
// This testRequest might be from TryPromoteBest, so we should roam
// to the new IP address before responding
f.handleHostRoaming(hostInfo, addr)
f.send(test, testReply, ci, hostInfo, hostInfo.remote, d, nb, out)
}
}
func (f *Interface) handleHandshakePacket(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
HandleIncomingHandshake(f, addr, packet, header, hostInfo)
}
func (f *Interface) handleRecvErrorPacket(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
// TODO: Remove this with recv_error deprecation
f.handleRecvError(addr, header)
}
func (f *Interface) handleCloseTunnelPacket(hostInfo *HostInfo, ci *ConnectionState, addr *udpAddr, header *Header, out []byte, packet []byte, fwPacket *FirewallPacket, nb []byte) {
hostInfo.logger().WithField("udpAddr", addr).
Info("Close tunnel received, tearing down.")
f.closeTunnel(hostInfo)
}

View File

@@ -1,31 +1,36 @@
package nebula package nebula
import ( const (
"github.com/slackhq/nebula/header" handshakeIXPSK0 = 0
"github.com/slackhq/nebula/udp" handshakeXXPSK0 = 1
) )
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H, hostinfo *HostInfo) { func HandleIncomingHandshake(f *Interface, addr *udpAddr, packet []byte, h *Header, hostinfo *HostInfo) {
// First remote allow list check before we know the vpnIp newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
if addr != nil { //TODO: For stage 1 we won't have hostinfo yet but stage 2 and above would require it, this check may be helpful in those cases
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) { //if err != nil {
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") // l.WithError(err).WithField("udpAddr", addr).Error("Error while finding host info for handshake message")
// return
//}
if !f.lightHouse.remoteAllowList.Allow(udp2ipInt(addr)) {
l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return return
} }
}
tearDown := false
switch h.Subtype { switch h.Subtype {
case header.HandshakeIXPSK0: case handshakeIXPSK0:
switch h.MessageCounter { switch h.MessageCounter {
case 1: case 1:
ixHandshakeStage1(f, addr, via, packet, h) tearDown = ixHandshakeStage1(f, addr, newHostinfo, packet, h)
case 2: case 2:
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex) tearDown = ixHandshakeStage2(f, addr, newHostinfo, packet, h)
tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h)
if tearDown && newHostinfo != nil {
f.handshakeManager.DeleteHostInfo(newHostinfo)
}
} }
} }
if tearDown && newHostinfo != nil {
f.handshakeManager.DeleteIndex(newHostinfo.localIndexId)
f.handshakeManager.DeleteVpnIP(newHostinfo.hostId)
}
} }

View File

@@ -1,38 +1,42 @@
package nebula package nebula
import ( import (
"bytes"
"sync/atomic"
"time" "time"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/slackhq/nebula/header" "github.com/golang/protobuf/proto"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
// NOISE IX Handshakes // NOISE IX Handshakes
// This function constructs a handshake packet, but does not actually send it // This function constructs a handshake packet, but does not actually send it
// Sending is done by the handshake manager // Sending is done by the handshake manager
func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) { func ixHandshakeStage0(f *Interface, vpnIp uint32, hostinfo *HostInfo) {
// This queries the lighthouse if we don't know a remote for the host // This queries the lighthouse if we don't know a remote for the host
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
// more quickly, effect is a quicker handshake.
if hostinfo.remote == nil { if hostinfo.remote == nil {
f.lightHouse.QueryServer(vpnIp, f) ips, err := f.lightHouse.Query(vpnIp, f)
if err != nil {
//l.Debugln(err)
}
for _, ip := range ips {
hostinfo.AddRemote(ip)
}
} }
err := f.handshakeManager.AddIndexHostInfo(hostinfo) myIndex, err := generateIndex()
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp). l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
return return
} }
ci := hostinfo.ConnectionState ci := hostinfo.ConnectionState
f.handshakeManager.AddIndexHostInfo(myIndex, hostinfo)
hsProto := &NebulaHandshakeDetails{ hsProto := &NebulaHandshakeDetails{
InitiatorIndex: hostinfo.localIndexId, InitiatorIndex: myIndex,
Time: uint64(time.Now().UnixNano()), Time: uint64(time.Now().Unix()),
Cert: ci.certState.rawCertificateNoKey, Cert: ci.certState.rawCertificateNoKey,
} }
@@ -41,337 +45,243 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
hs := &NebulaHandshake{ hs := &NebulaHandshake{
Details: hsProto, Details: hsProto,
} }
hsBytes, err = hs.Marshal() hsBytes, err = proto.Marshal(hs)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp). l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
return return
} }
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1) header := HeaderEncode(make([]byte, HeaderLen), Version, uint8(handshake), handshakeIXPSK0, 0, 1)
ci.messageCounter.Add(1) atomic.AddUint64(ci.messageCounter, 1)
msg, _, _, err := ci.H.WriteMessage(h, hsBytes) msg, _, _, err := ci.H.WriteMessage(header, hsBytes)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp). l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
return return
} }
// We are sending handshake packet 1, so we don't expect to receive
// handshake packet 1 from the responder
ci.window.Update(f.l, 1)
hostinfo.HandshakePacket[0] = msg hostinfo.HandshakePacket[0] = msg
hostinfo.HandshakeReady = true hostinfo.HandshakeReady = true
hostinfo.handshakeStart = time.Now() hostinfo.handshakeStart = time.Now()
} }
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) { func ixHandshakeStage1(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet []byte, h *Header) bool {
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0) var ip uint32
if h.RemoteIndex == 0 {
ci := f.newConnectionState(false, noise.HandshakeIX, []byte{}, 0)
// Mark packet 1 as seen so it doesn't show up as missed // Mark packet 1 as seen so it doesn't show up as missed
ci.window.Update(f.l, 1) ci.window.Update(1)
msg, _, _, err := ci.H.ReadMessage(nil, packet[header.Len:]) msg, _, _, err := ci.H.ReadMessage(nil, packet[HeaderLen:])
if err != nil { if err != nil {
f.l.WithError(err).WithField("udpAddr", addr). l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.ReadMessage") WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.ReadMessage")
return return true
} }
hs := &NebulaHandshake{} hs := &NebulaHandshake{}
err = hs.Unmarshal(msg) err = proto.Unmarshal(msg, hs)
/* /*
l.Debugln("GOT INDEX: ", hs.Details.InitiatorIndex) l.Debugln("GOT INDEX: ", hs.Details.InitiatorIndex)
*/ */
if err != nil || hs.Details == nil { if err != nil || hs.Details == nil {
f.l.WithError(err).WithField("udpAddr", addr). l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed unmarshal handshake message") WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
return return true
} }
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool) hostinfo, _ := f.handshakeManager.pendingHostMap.QueryReverseIndex(hs.Details.InitiatorIndex)
if hostinfo != nil && bytes.Equal(hostinfo.HandshakePacket[0], packet[HeaderLen:]) {
if msg, ok := hostinfo.HandshakePacket[2]; ok {
f.messageMetrics.Tx(handshake, NebulaMessageSubType(msg[1]), 1)
err := f.outside.WriteTo(msg, addr)
if err != nil { if err != nil {
f.l.WithError(err).WithField("udpAddr", addr). l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
WithError(err).Error("Failed to send handshake message")
} else {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
Info("Handshake message sent")
}
return false
}
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cached", true).
WithField("packets", hostinfo.HandshakePacket).
Error("Seen this handshake packet already but don't have a cached packet to return")
}
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert)
if err != nil {
l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
Info("Invalid certificate from host") Info("Invalid certificate from host")
return return true
} }
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP) vpnIP := ip2int(remoteCert.Details.Ips[0].IP)
certName := remoteCert.Details.Name certName := remoteCert.Details.Name
fingerprint, _ := remoteCert.Sha256Sum() fingerprint, _ := remoteCert.Sha256Sum()
issuer := remoteCert.Details.Issuer
if vpnIp == f.myVpnIp { myIndex, err := generateIndex()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Refusing to handshake with myself")
return
}
if addr != nil {
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.IP) {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return
}
}
myIndex, err := generateIndex(f.l)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr). l.WithError(err).WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index") WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
return return true
} }
hostinfo := &HostInfo{ hostinfo, err = f.handshakeManager.AddIndex(myIndex, ci)
ConnectionState: ci, if err != nil {
localIndexId: myIndex, l.WithError(err).WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
remoteIndexId: hs.Details.InitiatorIndex, WithField("certName", certName).
vpnIp: vpnIp, WithField("fingerprint", fingerprint).
HandshakePacket: make(map[uint8][]byte, 0), WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Error adding index to connection manager")
lastHandshakeTime: hs.Details.Time,
relayState: RelayState{ return true
relays: map[iputil.VpnIp]struct{}{}, }
relayForByIp: map[iputil.VpnIp]*Relay{}, l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
relayForByIdx: map[uint32]*Relay{},
},
}
hostinfo.Lock()
defer hostinfo.Unlock()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message received") Info("Handshake message received")
hostinfo.remoteIndexId = hs.Details.InitiatorIndex
hs.Details.ResponderIndex = myIndex hs.Details.ResponderIndex = myIndex
hs.Details.Cert = ci.certState.rawCertificateNoKey hs.Details.Cert = ci.certState.rawCertificateNoKey
// Update the time in case their clock is way off from ours
hs.Details.Time = uint64(time.Now().UnixNano())
hsBytes, err := hs.Marshal() hsBytes, err := proto.Marshal(hs)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message") WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
return return true
} }
nh := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, hs.Details.InitiatorIndex, 2) header := HeaderEncode(make([]byte, HeaderLen), Version, uint8(handshake), handshakeIXPSK0, hs.Details.InitiatorIndex, 2)
msg, dKey, eKey, err := ci.H.WriteMessage(nh, hsBytes) msg, dKey, eKey, err := ci.H.WriteMessage(header, hsBytes)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage") WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
return return true
} else if dKey == nil || eKey == nil {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Noise did not arrive at a key")
return
} }
hostinfo.HandshakePacket[0] = make([]byte, len(packet[header.Len:])) if f.hostMap.CheckHandshakeCompleteIP(vpnIP) && vpnIP < ip2int(f.certState.certificate.Details.Ips[0].IP) {
copy(hostinfo.HandshakePacket[0], packet[header.Len:]) l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Prevented a handshake race")
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
f.SendMessageToVpnIp(test, testRequest, vpnIP, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
return true
}
hostinfo.HandshakePacket[0] = make([]byte, len(packet[HeaderLen:]))
copy(hostinfo.HandshakePacket[0], packet[HeaderLen:])
// Regardless of whether you are the sender or receiver, you should arrive here // Regardless of whether you are the sender or receiver, you should arrive here
// and complete standing up the connection. // and complete standing up the connection.
if dKey != nil && eKey != nil {
hostinfo.HandshakePacket[2] = make([]byte, len(msg)) hostinfo.HandshakePacket[2] = make([]byte, len(msg))
copy(hostinfo.HandshakePacket[2], msg) copy(hostinfo.HandshakePacket[2], msg)
// We are sending handshake packet 2, so we don't expect to receive f.messageMetrics.Tx(handshake, NebulaMessageSubType(msg[1]), 1)
// handshake packet 2 from the initiator.
ci.window.Update(f.l, 2)
ci.peerCert = remoteCert
ci.dKey = NewNebulaCipherState(dKey)
ci.eKey = NewNebulaCipherState(eKey)
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
hostinfo.SetRemote(addr)
hostinfo.CreateRemoteCIDR(remoteCert)
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f)
if err != nil {
switch err {
case ErrAlreadySeen:
// Update remote if preferred (Note we have to switch to locking
// the existing hostinfo, and then switch back so the defer Unlock
// higher in this function still works)
hostinfo.Unlock()
existing.Lock()
// Update remote if preferred
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to
// the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
}
existing.Unlock()
hostinfo.Lock()
msg = existing.HandshakePacket[2]
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
if addr != nil {
err := f.outside.WriteTo(msg, addr) err := f.outside.WriteTo(msg, addr)
if err != nil { if err != nil {
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr). l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
WithError(err).Error("Failed to send handshake message")
} else {
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
Info("Handshake message sent")
}
return
} else {
if via == nil {
f.l.Error("Handshake send failed: both addr and via are nil.")
return
}
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via.relayHI.vpnIp).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
Info("Handshake message sent")
return
}
case ErrExistingHostInfo:
// This means there was an existing tunnel and this handshake was older than the one we are currently based on
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("oldHandshakeTime", existing.lastHandshakeTime).
WithField("newHandshakeTime", hostinfo.lastHandshakeTime).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake too old")
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
return
case ErrLocalIndexCollision:
// This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
Error("Failed to add HostInfo due to localIndex collision")
return
default:
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
// And we forget to update it here
f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Error("Failed to add HostInfo to HostMap")
return
}
}
// Do the send
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
if addr != nil {
err = f.outside.WriteTo(msg, addr)
if err != nil {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake") WithError(err).Error("Failed to send handshake")
} else { } else {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent") Info("Handshake message sent")
} }
ip = ip2int(remoteCert.Details.Ips[0].IP)
ci.peerCert = remoteCert
ci.dKey = NewNebulaCipherState(dKey)
ci.eKey = NewNebulaCipherState(eKey)
//l.Debugln("got symmetric pairs")
//hostinfo.ClearRemotes()
hostinfo.AddRemote(*addr)
hostinfo.CreateRemoteCIDR(remoteCert)
f.lightHouse.AddRemoteAndReset(ip, addr)
if f.serveDns {
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
}
ho, err := f.hostMap.QueryVpnIP(vpnIP)
if err == nil && ho.localIndexId != 0 {
l.WithField("vpnIp", vpnIP).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("action", "removing stale index").
WithField("index", ho.localIndexId).
Debug("Handshake processing")
f.hostMap.DeleteIndex(ho.localIndexId)
}
f.hostMap.AddIndexHostInfo(hostinfo.localIndexId, hostinfo)
f.hostMap.AddVpnIPHostInfo(vpnIP, hostinfo)
hostinfo.handshakeComplete()
} else { } else {
if via == nil { l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
f.l.Error("Handshake send failed: both addr and via are nil.")
return
}
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). Error("Noise did not arrive at a key")
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent")
}
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
return
}
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *HostInfo, packet []byte, h *header.H) bool {
if hostinfo == nil {
// Nothing here to tear down, got a bogus stage 2 packet
return true return true
} }
hostinfo.Lock()
defer hostinfo.Unlock()
if addr != nil {
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return false
} }
f.hostMap.AddRemote(ip, addr)
return false
}
func ixHandshakeStage2(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet []byte, h *Header) bool {
if hostinfo == nil {
return true
}
if bytes.Equal(hostinfo.HandshakePacket[2], packet[HeaderLen:]) {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Error("Already seen this handshake packet")
return false
} }
ci := hostinfo.ConnectionState ci := hostinfo.ConnectionState
if ci.ready { // Mark packet 2 as seen so it doesn't show up as missed
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). ci.window.Update(2)
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Info("Handshake is already complete")
// Update remote if preferred hostinfo.HandshakePacket[2] = make([]byte, len(packet[HeaderLen:]))
if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) { copy(hostinfo.HandshakePacket[2], packet[HeaderLen:])
// Send a test packet to ensure the other side has also switched to
// the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
}
// We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[HeaderLen:])
return false
}
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Error("Failed to call noise.ReadMessage") Error("Failed to call noise.ReadMessage")
@@ -379,118 +289,89 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
// to DOS us. Every other error condition after should to allow a possible good handshake to complete in the // to DOS us. Every other error condition after should to allow a possible good handshake to complete in the
// near future // near future
return false return false
} else if dKey == nil || eKey == nil {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Error("Noise did not arrive at a key")
// This should be impossible in IX but just in case, if we get here then there is no chance to recover
// the handshake state machine. Tear it down
return true
} }
hs := &NebulaHandshake{} hs := &NebulaHandshake{}
err = hs.Unmarshal(msg) err = proto.Unmarshal(msg, hs)
if err != nil || hs.Details == nil { if err != nil || hs.Details == nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message") WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
return true return true
} }
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool) remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Error("Invalid certificate from host") Error("Invalid certificate from host")
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
return true return true
} }
vpnIP := ip2int(remoteCert.Details.Ips[0].IP)
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
certName := remoteCert.Details.Name certName := remoteCert.Details.Name
fingerprint, _ := remoteCert.Sha256Sum() fingerprint, _ := remoteCert.Sha256Sum()
issuer := remoteCert.Details.Issuer
// Ensure the right host responded
if vpnIp != hostinfo.vpnIp {
f.l.WithField("intendedVpnIp", hostinfo.vpnIp).WithField("haveVpnIp", vpnIp).
WithField("udpAddr", addr).WithField("certName", certName).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Info("Incorrect host responded to handshake")
// Release our old handshake from pending, it should not continue
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
// Create a new hostinfo/handshake for the intended vpn ip
//TODO: this adds it to the timer wheel in a way that aggressively retries
newHostInfo := f.getOrHandshake(hostinfo.vpnIp)
newHostInfo.Lock()
// Block the current used address
newHostInfo.remotes = hostinfo.remotes
newHostInfo.remotes.BlockRemote(addr)
// Get the correct remote list for the host we did handshake with
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
Info("Blocked addresses for handshakes")
// Swap the packet store to benefit the original intended recipient
hostinfo.ConnectionState.queueLock.Lock()
newHostInfo.packetStore = hostinfo.packetStore
hostinfo.packetStore = []*cachedPacket{}
hostinfo.ConnectionState.queueLock.Unlock()
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
hostinfo.vpnIp = vpnIp
f.sendCloseTunnel(hostinfo)
newHostInfo.Unlock()
return true
}
// Mark packet 2 as seen so it doesn't show up as missed
ci.window.Update(f.l, 2)
duration := time.Since(hostinfo.handshakeStart).Nanoseconds() duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("durationNs", duration). WithField("durationNs", duration).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message received") Info("Handshake message received")
//ci.remoteIndex = hs.ResponderIndex
hostinfo.remoteIndexId = hs.Details.ResponderIndex hostinfo.remoteIndexId = hs.Details.ResponderIndex
hostinfo.lastHandshakeTime = hs.Details.Time hs.Details.Cert = ci.certState.rawCertificateNoKey
// Store their cert and our symmetric keys /*
hsBytes, err := proto.Marshal(hs)
if err != nil {
l.Debugln("Failed to marshal handshake: ", err)
return
}
*/
// Regardless of whether you are the sender or receiver, you should arrive here
// and complete standing up the connection.
if dKey != nil && eKey != nil {
ip := ip2int(remoteCert.Details.Ips[0].IP)
ci.peerCert = remoteCert ci.peerCert = remoteCert
ci.dKey = NewNebulaCipherState(dKey) ci.dKey = NewNebulaCipherState(dKey)
ci.eKey = NewNebulaCipherState(eKey) ci.eKey = NewNebulaCipherState(eKey)
//l.Debugln("got symmetric pairs")
// Make sure the current udpAddr being used is set for responding //hostinfo.ClearRemotes()
if addr != nil { f.hostMap.AddRemote(ip, addr)
hostinfo.SetRemote(addr) hostinfo.CreateRemoteCIDR(remoteCert)
} else { f.lightHouse.AddRemoteAndReset(ip, addr)
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp) if f.serveDns {
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
} }
// Build up the radix for the firewall if we have subnets in the cert ho, err := f.hostMap.QueryVpnIP(vpnIP)
hostinfo.CreateRemoteCIDR(remoteCert) if err == nil && ho.localIndexId != 0 {
l.WithField("vpnIp", vpnIP).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("action", "removing stale index").
WithField("index", ho.localIndexId).
Debug("Handshake processing")
f.hostMap.DeleteIndex(ho.localIndexId)
}
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp f.hostMap.AddVpnIPHostInfo(vpnIP, hostinfo)
f.handshakeManager.Complete(hostinfo, f) f.hostMap.AddIndexHostInfo(hostinfo.localIndexId, hostinfo)
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics) hostinfo.handshakeComplete()
f.metricHandshakes.Update(duration) f.metricHandshakes.Update(duration)
} else {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Error("Noise did not arrive at a key")
return true
}
return false return false
} }

View File

@@ -1,42 +1,39 @@
package nebula package nebula
import ( import (
"bytes"
"context"
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"errors" "fmt"
"net" "net"
"time" "time"
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
const ( const (
// Total time to try a handshake = sequence of HandshakeTryInterval * HandshakeRetries
// With 100ms interval and 20 retries is 23.5 seconds
DefaultHandshakeTryInterval = time.Millisecond * 100 DefaultHandshakeTryInterval = time.Millisecond * 100
DefaultHandshakeRetries = 10 DefaultHandshakeRetries = 20
// DefaultHandshakeWaitRotation is the number of handshake attempts to do before starting to use other ips addresses
DefaultHandshakeWaitRotation = 5
DefaultHandshakeTriggerBuffer = 64 DefaultHandshakeTriggerBuffer = 64
DefaultUseRelays = true
) )
var ( var (
defaultHandshakeConfig = HandshakeConfig{ defaultHandshakeConfig = HandshakeConfig{
tryInterval: DefaultHandshakeTryInterval, tryInterval: DefaultHandshakeTryInterval,
retries: DefaultHandshakeRetries, retries: DefaultHandshakeRetries,
waitRotation: DefaultHandshakeWaitRotation,
triggerBuffer: DefaultHandshakeTriggerBuffer, triggerBuffer: DefaultHandshakeTriggerBuffer,
useRelays: DefaultUseRelays,
} }
) )
type HandshakeConfig struct { type HandshakeConfig struct {
tryInterval time.Duration tryInterval time.Duration
retries int retries int
waitRotation int
triggerBuffer int triggerBuffer int
useRelays bool
messageMetrics *MessageMetrics messageMetrics *MessageMetrics
} }
@@ -45,388 +42,190 @@ type HandshakeManager struct {
pendingHostMap *HostMap pendingHostMap *HostMap
mainHostMap *HostMap mainHostMap *HostMap
lightHouse *LightHouse lightHouse *LightHouse
outside *udp.Conn outside *udpConn
config HandshakeConfig config HandshakeConfig
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
messageMetrics *MessageMetrics
metricInitiated metrics.Counter
metricTimedOut metrics.Counter
l *logrus.Logger
// can be used to trigger outbound handshake for the given vpnIp // can be used to trigger outbound handshake for the given vpnIP
trigger chan iputil.VpnIp trigger chan uint32
OutboundHandshakeTimer *SystemTimerWheel
InboundHandshakeTimer *SystemTimerWheel
messageMetrics *MessageMetrics
} }
func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager { func NewHandshakeManager(tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udpConn, config HandshakeConfig) *HandshakeManager {
return &HandshakeManager{ return &HandshakeManager{
pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges), pendingHostMap: NewHostMap("pending", tunCidr, preferredRanges),
mainHostMap: mainHostMap, mainHostMap: mainHostMap,
lightHouse: lightHouse, lightHouse: lightHouse,
outside: outside, outside: outside,
config: config, config: config,
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)), trigger: make(chan uint32, config.triggerBuffer),
OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, config.tryInterval*time.Duration(config.retries)),
InboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, config.tryInterval*time.Duration(config.retries)),
messageMetrics: config.messageMetrics, messageMetrics: config.messageMetrics,
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
l: l,
} }
} }
func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) { func (c *HandshakeManager) Run(f EncWriter) {
clockSource := time.NewTicker(c.config.tryInterval) clockSource := time.Tick(c.config.tryInterval)
defer clockSource.Stop()
for { for {
select { select {
case <-ctx.Done():
return
case vpnIP := <-c.trigger: case vpnIP := <-c.trigger:
l.WithField("vpnIp", IntIp(vpnIP)).Debug("HandshakeManager: triggered")
c.handleOutbound(vpnIP, f, true) c.handleOutbound(vpnIP, f, true)
case now := <-clockSource.C: case now := <-clockSource:
c.NextOutboundHandshakeTimerTick(now, f) c.NextOutboundHandshakeTimerTick(now, f)
c.NextInboundHandshakeTimerTick(now)
} }
} }
} }
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) { func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) {
c.OutboundHandshakeTimer.Advance(now) c.OutboundHandshakeTimer.advance(now)
for { for {
vpnIp, has := c.OutboundHandshakeTimer.Purge() ep := c.OutboundHandshakeTimer.Purge()
if !has { if ep == nil {
break break
} }
c.handleOutbound(vpnIp, f, false) vpnIP := ep.(uint32)
c.handleOutbound(vpnIP, f, false)
} }
} }
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, lighthouseTriggered bool) { func (c *HandshakeManager) handleOutbound(vpnIP uint32, f EncWriter, lighthouseTriggered bool) {
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp) index, err := c.pendingHostMap.GetIndexByVpnIP(vpnIP)
if err != nil { if err != nil {
return return
} }
hostinfo.Lock() hostinfo, err := c.pendingHostMap.QueryVpnIP(vpnIP)
defer hostinfo.Unlock() if err != nil {
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed.
if hostinfo.HandshakeComplete {
// Ensure we don't exist in the pending hostmap anymore since we have completed
c.pendingHostMap.DeleteHostInfo(hostinfo)
return return
} }
// Check if we have a handshake packet to transmit yet // If we haven't finished the handshake and we haven't hit max retries, query
if !hostinfo.HandshakeReady { // lighthouse and then send the handshake packet again.
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly if hostinfo.HandshakeCounter < c.config.retries && !hostinfo.HandshakeComplete {
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState if hostinfo.remote == nil {
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) // We continue to query the lighthouse because hosts may
// come online during handshake retries. If the query
// succeeds (no error), add the lighthouse info to hostinfo
ips := c.lightHouse.QueryCache(vpnIP)
// If we have no responses yet, or only one IP (the host hadn't
// finished reporting its own IPs yet), then send another query to
// the LH.
if len(ips) <= 1 {
ips, err = c.lightHouse.Query(vpnIP, f)
}
if err == nil {
for _, ip := range ips {
hostinfo.AddRemote(ip)
}
hostinfo.ForcePromoteBest(c.mainHostMap.preferredRanges)
}
} else if lighthouseTriggered {
// We were triggered by a lighthouse HostQueryReply packet, but
// we have already picked a remote for this host (this can happen
// if we are configured with multiple lighthouses). So we can skip
// this trigger and let the timerwheel handle the rest of the
// process
return return
} }
// If we are out of time, clean up hostinfo.HandshakeCounter++
if hostinfo.HandshakeCounter >= c.config.retries {
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)). // We want to use the "best" calculated ip for the first 5 attempts, after that we just blindly rotate through
// all the others until we can stand up a connection.
if hostinfo.HandshakeCounter > c.config.waitRotation {
hostinfo.rotateRemote()
}
// Ensure the handshake is ready to avoid a race in timer tick and stage 0 handshake generation
if hostinfo.HandshakeReady && hostinfo.remote != nil {
c.messageMetrics.Tx(handshake, NebulaMessageSubType(hostinfo.HandshakePacket[0][1]), 1)
err := c.outside.WriteTo(hostinfo.HandshakePacket[0], hostinfo.remote)
if err != nil {
hostinfo.logger().WithField("udpAddr", hostinfo.remote).
WithField("initiatorIndex", hostinfo.localIndexId). WithField("initiatorIndex", hostinfo.localIndexId).
WithField("remoteIndex", hostinfo.remoteIndexId). WithField("remoteIndex", hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
Info("Handshake timed out")
c.metricTimedOut.Inc(1)
c.pendingHostMap.DeleteHostInfo(hostinfo)
return
}
// Get a remotes object if we don't already have one.
// This is mainly to protect us as this should never be the case
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
if hostinfo.remotes == nil {
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
}
remotes := hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hostinfo.HandshakeLastRemotes)
// We only care about a lighthouse trigger if we have new remotes to send to.
// This is a very specific optimization for a fast lighthouse reply.
if lighthouseTriggered && !remotesHaveChanged {
// If we didn't return here a lighthouse could cause us to aggressively send handshakes
return
}
hostinfo.HandshakeLastRemotes = remotes
// TODO: this will generate a load of queries for hosts with only 1 ip
// (such as ones registered to the lighthouse with only a private IP)
// So we only do it one time after attempting 5 handshakes already.
if len(remotes) <= 1 && hostinfo.HandshakeCounter == 5 {
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
// the learned public ip for them. Query again to short circuit the promotion counter
c.lightHouse.QueryServer(vpnIp, f)
}
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
var sentTo []*udp.Addr
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
if err != nil {
hostinfo.logger(c.l).WithField("udpAddr", addr).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake message") WithError(err).Error("Failed to send handshake message")
} else { } else {
sentTo = append(sentTo, addr) //TODO: this log line is assuming a lot of stuff around the cached stage 0 handshake packet, we should
} // keep the real packet struct around for logging purposes
}) hostinfo.logger().WithField("udpAddr", hostinfo.remote).
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout,
// so only log when the list of remotes has changed
if remotesHaveChanged {
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
WithField("initiatorIndex", hostinfo.localIndexId). WithField("initiatorIndex", hostinfo.localIndexId).
WithField("remoteIndex", hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message sent") Info("Handshake message sent")
} else if c.l.IsLevelEnabled(logrus.DebugLevel) {
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Debug("Handshake message sent")
}
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
hostinfo.logger(c.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
// Send a RelayRequest to all known Relay IP's
for _, relay := range hostinfo.remotes.relays {
// Don't relay to myself, and don't relay through the host I'm trying to connect to
if *relay == vpnIp || *relay == c.lightHouse.myVpnIp {
continue
}
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
if err != nil || relayHostInfo.remote == nil {
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
f.Handshake(*relay)
continue
}
// Check the relay HostInfo to see if we already established a relay through it
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
switch existingRelay.State {
case Established:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
case Requested:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
// Re-send the CreateRelay request, in case the previous one was lost.
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: existingRelay.LocalIndex,
RelayFromIp: uint32(c.lightHouse.myVpnIp),
RelayToIp: uint32(vpnIp),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(c.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
// This must send over the hostinfo, not over hm.Hosts[ip]
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
c.l.WithFields(logrus.Fields{
"relayFrom": c.lightHouse.myVpnIp,
"relayTo": vpnIp,
"initiatorRelayIndex": existingRelay.LocalIndex,
"relay": *relay}).
Info("send CreateRelayRequest")
}
default:
hostinfo.logger(c.l).
WithField("vpnIp", vpnIp).
WithField("state", existingRelay.State).
WithField("relay", relayHostInfo.vpnIp).
Errorf("Relay unexpected state")
}
} else {
// No relays exist or requested yet.
if relayHostInfo.remote != nil {
idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested)
if err != nil {
hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
}
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: idx,
RelayFromIp: uint32(c.lightHouse.myVpnIp),
RelayToIp: uint32(vpnIp),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(c.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
c.l.WithFields(logrus.Fields{
"relayFrom": c.lightHouse.myVpnIp,
"relayTo": vpnIp,
"initiatorRelayIndex": idx,
"relay": *relay}).
Info("send CreateRelayRequest")
}
}
}
} }
} }
// Increment the counter to increase our delay, linear backoff // Readd to the timer wheel so we continue trying wait HandshakeTryInterval * counter longer for next try
hostinfo.HandshakeCounter++
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
if !lighthouseTriggered { if !lighthouseTriggered {
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) //l.Infoln("Interval: ", HandshakeTryInterval*time.Duration(hostinfo.HandshakeCounter))
c.OutboundHandshakeTimer.Add(vpnIP, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
}
} else {
c.pendingHostMap.DeleteVpnIP(vpnIP)
c.pendingHostMap.DeleteIndex(index)
} }
} }
func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo { func (c *HandshakeManager) NextInboundHandshakeTimerTick(now time.Time) {
hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init) c.InboundHandshakeTimer.advance(now)
for {
if created { ep := c.InboundHandshakeTimer.Purge()
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval) if ep == nil {
c.metricInitiated.Inc(1) break
} }
index := ep.(uint32)
vpnIP, err := c.pendingHostMap.GetVpnIPByIndex(index)
if err != nil {
continue
}
c.pendingHostMap.DeleteIndex(index)
c.pendingHostMap.DeleteVpnIP(vpnIP)
}
}
func (c *HandshakeManager) AddVpnIP(vpnIP uint32) *HostInfo {
hostinfo := c.pendingHostMap.AddVpnIP(vpnIP)
// We lock here and use an array to insert items to prevent locking the
// main receive thread for very long by waiting to add items to the pending map
c.OutboundHandshakeTimer.Add(vpnIP, c.config.tryInterval)
return hostinfo return hostinfo
} }
var ( func (c *HandshakeManager) DeleteVpnIP(vpnIP uint32) {
ErrExistingHostInfo = errors.New("existing hostinfo") //l.Debugln("Deleting pending vpn ip :", IntIp(vpnIP))
ErrAlreadySeen = errors.New("already seen") c.pendingHostMap.DeleteVpnIP(vpnIP)
ErrLocalIndexCollision = errors.New("local index collision")
)
// CheckAndComplete checks for any conflicts in the main and pending hostmap
// before adding hostinfo to main. If err is nil, it was added. Otherwise err will be:
//
// ErrAlreadySeen if we already have an entry in the hostmap that has seen the
// exact same handshake packet
//
// ErrExistingHostInfo if we already have an entry in the hostmap for this
// VpnIp and the new handshake was older than the one we currently have
//
// ErrLocalIndexCollision if we already have an entry in the main or pending
// hostmap for the hostinfo.localIndexId.
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock()
defer c.mainHostMap.Unlock()
// Check if we already have a tunnel with this vpn ip
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
if found && existingHostInfo != nil {
testHostInfo := existingHostInfo
for testHostInfo != nil {
// Is it just a delayed handshake packet?
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], testHostInfo.HandshakePacket[handshakePacket]) {
return testHostInfo, ErrAlreadySeen
}
testHostInfo = testHostInfo.next
}
// Is this a newer handshake?
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime && !existingHostInfo.ConnectionState.initiator {
return existingHostInfo, ErrExistingHostInfo
}
existingHostInfo.logger(c.l).Info("Taking new handshake")
}
existingIndex, found := c.mainHostMap.Indexes[hostinfo.localIndexId]
if found {
// We have a collision, but for a different hostinfo
return existingIndex, ErrLocalIndexCollision
}
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId]
if found && existingIndex != hostinfo {
// We have a collision, but for a different hostinfo
return existingIndex, ErrLocalIndexCollision
}
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
if found && existingRemoteIndex != nil && existingRemoteIndex.vpnIp != hostinfo.vpnIp {
// We have a collision, but this can happen since we can't control
// the remote ID. Just log about the situation as a note.
hostinfo.logger(c.l).
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
Info("New host shadows existing host remoteIndex")
}
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
return existingHostInfo, nil
} }
// Complete is a simpler version of CheckAndComplete when we already know we func (c *HandshakeManager) AddIndex(index uint32, ci *ConnectionState) (*HostInfo, error) {
// won't have a localIndexId collision because we already have an entry in the hostinfo, err := c.pendingHostMap.AddIndex(index, ci)
// pendingHostMap. An existing hostinfo is returned if there was one.
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock()
defer c.mainHostMap.Unlock()
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
if found && existingRemoteIndex != nil {
// We have a collision, but this can happen since we can't control
// the remote ID. Just log about the situation as a note.
hostinfo.logger(c.l).
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
Info("New host shadows existing host remoteIndex")
}
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
}
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
// and adds it to the pendingHostMap. Will error if we are unable to generate
// a unique localIndexId
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.RLock()
defer c.mainHostMap.RUnlock()
for i := 0; i < 32; i++ {
index, err := generateIndex(c.l)
if err != nil { if err != nil {
return err return nil, fmt.Errorf("Issue adding index: %d", index)
} }
//c.mainHostMap.AddIndexHostInfo(index, hostinfo)
_, inPending := c.pendingHostMap.Indexes[index] c.InboundHandshakeTimer.Add(index, time.Second*10)
_, inMain := c.mainHostMap.Indexes[index] return hostinfo, nil
if !inMain && !inPending {
h.localIndexId = index
c.pendingHostMap.Indexes[index] = h
return nil
}
}
return errors.New("failed to generate unique localIndexId")
} }
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) { func (c *HandshakeManager) AddIndexHostInfo(index uint32, h *HostInfo) {
c.pendingHostMap.addRemoteIndexHostInfo(index, h) c.pendingHostMap.AddIndexHostInfo(index, h)
} }
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) { func (c *HandshakeManager) DeleteIndex(index uint32) {
//l.Debugln("Deleting pending hostinfo :", hostinfo) //l.Debugln("Deleting pending index :", index)
c.pendingHostMap.DeleteHostInfo(hostinfo) c.pendingHostMap.DeleteIndex(index)
} }
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) { func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
@@ -440,28 +239,18 @@ func (c *HandshakeManager) EmitStats() {
// Utility functions below // Utility functions below
func generateIndex(l *logrus.Logger) (uint32, error) { func generateIndex() (uint32, error) {
b := make([]byte, 4) b := make([]byte, 4)
// Let zero mean we don't know the ID, so don't generate zero
var index uint32
for index == 0 {
_, err := rand.Read(b) _, err := rand.Read(b)
if err != nil { if err != nil {
l.Errorln(err) l.Errorln(err)
return 0, err return 0, err
} }
index = binary.BigEndian.Uint32(b) index := binary.BigEndian.Uint32(b)
}
if l.Level >= logrus.DebugLevel { if l.Level >= logrus.DebugLevel {
l.WithField("index", index). l.WithField("index", index).
Debug("Generated index") Debug("Generated index")
} }
return index, nil return index, nil
} }
func hsTimeout(tries int, interval time.Duration) time.Duration {
return time.Duration(tries / 2 * ((2 * int(interval)) + (tries-1)*int(interval)))
}

View File

@@ -5,69 +5,145 @@ import (
"testing" "testing"
"time" "time"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func Test_NewHandshakeManagerVpnIp(t *testing.T) { var indexes []uint32 = []uint32{1000, 2000, 3000, 4000}
l := test.NewLogger()
//var ips []uint32 = []uint32{9000, 9999999, 3, 292394923}
var ips []uint32
func Test_NewHandshakeManagerIndex(t *testing.T) {
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24") _, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24")
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2")) ips = []uint32{ip2int(net.ParseIP("172.1.1.2"))}
preferredRanges := []*net.IPNet{localrange}
mainHM := NewHostMap("test", vpncidr, preferredRanges)
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
now := time.Now()
blah.NextInboundHandshakeTimerTick(now)
// Add four indexes
for _, v := range indexes {
blah.AddIndex(v, &ConnectionState{})
}
// Confirm they are in the pending index list
for _, v := range indexes {
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(v))
}
// Adding something to pending should not affect the main hostmap
assert.Len(t, mainHM.Indexes, 0)
// Jump ahead 8 seconds
for i := 1; i <= DefaultHandshakeRetries; i++ {
next_tick := now.Add(DefaultHandshakeTryInterval * time.Duration(i))
blah.NextInboundHandshakeTimerTick(next_tick)
}
// Confirm they are still in the pending index list
for _, v := range indexes {
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(v))
}
// Jump ahead 4 more seconds
next_tick := now.Add(12 * time.Second)
blah.NextInboundHandshakeTimerTick(next_tick)
// Confirm they have been removed
for _, v := range indexes {
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(v))
}
}
func Test_NewHandshakeManagerVpnIP(t *testing.T) {
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
ips = []uint32{ip2int(net.ParseIP("172.1.1.2"))}
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{} mw := &mockEncWriter{}
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) mainHM := NewHostMap("test", vpncidr, preferredRanges)
lh := newTestLighthouse()
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
now := time.Now() now := time.Now()
blah.NextOutboundHandshakeTimerTick(now, mw) blah.NextOutboundHandshakeTimerTick(now, mw)
var initCalled bool // Add four "IPs" - which are just uint32s
initFunc := func(*HostInfo) { for _, v := range ips {
initCalled = true blah.AddVpnIP(v)
} }
i := blah.AddVpnIp(ip, initFunc)
assert.True(t, initCalled)
initCalled = false
i2 := blah.AddVpnIp(ip, initFunc)
assert.False(t, initCalled)
assert.Same(t, i, i2)
i.remotes = NewRemoteList(nil)
i.HandshakeReady = true
// Adding something to pending should not affect the main hostmap // Adding something to pending should not affect the main hostmap
assert.Len(t, mainHM.Hosts, 0) assert.Len(t, mainHM.Hosts, 0)
// Confirm they are in the pending index list // Confirm they are in the pending index list
assert.Contains(t, blah.pendingHostMap.Hosts, ip) for _, v := range ips {
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(v))
}
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right // Jump ahead `HandshakeRetries` ticks
for i := 1; i <= DefaultHandshakeRetries+1; i++ { cumulative := time.Duration(0)
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval) for i := 0; i <= DefaultHandshakeRetries+1; i++ {
blah.NextOutboundHandshakeTimerTick(now, mw) cumulative += time.Duration(i)*DefaultHandshakeTryInterval + 1
next_tick := now.Add(cumulative)
//l.Infoln(next_tick)
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
} }
// Confirm they are still in the pending index list // Confirm they are still in the pending index list
assert.Contains(t, blah.pendingHostMap.Hosts, ip) for _, v := range ips {
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(v))
// Tick 1 more time, a minute will certainly flush it out }
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw) // Jump ahead 1 more second
cumulative += time.Duration(DefaultHandshakeRetries+1) * DefaultHandshakeTryInterval
next_tick := now.Add(cumulative)
//l.Infoln(next_tick)
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
// Confirm they have been removed // Confirm they have been removed
assert.NotContains(t, blah.pendingHostMap.Hosts, ip) for _, v := range ips {
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(v))
}
} }
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) { func Test_NewHandshakeManagerTrigger(t *testing.T) {
for _, i := range tw.t.wheel { _, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
ip := ip2int(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{}
mainHM := NewHostMap("test", vpncidr, preferredRanges)
lh := &LightHouse{}
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, lh, &udpConn{}, defaultHandshakeConfig)
now := time.Now()
blah.NextOutboundHandshakeTimerTick(now, mw)
assert.Equal(t, 0, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
blah.AddVpnIP(ip)
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
// Trigger the same method the channel will
blah.handleOutbound(ip, mw, true)
// Make sure the trigger doesn't schedule another timer entry
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
hi := blah.pendingHostMap.Hosts[ip]
assert.Nil(t, hi.remote)
lh.addrMap = map[uint32][]udpAddr{
ip: {*NewUDPAddrFromString("10.1.1.1:4242")},
}
// This should trigger the hostmap to populate the hostinfo
blah.handleOutbound(ip, mw, true)
assert.NotNil(t, hi.remote)
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
}
func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) {
for _, i := range tw.wheel {
n := i.Head n := i.Head
for n != nil { for n != nil {
c++ c++
@@ -77,19 +153,92 @@ func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
return c return c
} }
func Test_NewHandshakeManagerVpnIPcleanup(t *testing.T) {
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
vpnIP = ip2int(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{}
mainHM := NewHostMap("test", vpncidr, preferredRanges)
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
now := time.Now()
blah.NextOutboundHandshakeTimerTick(now, mw)
hostinfo := blah.AddVpnIP(vpnIP)
// Pretned we have an index too
blah.AddIndexHostInfo(12341234, hostinfo)
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(12341234))
// Jump ahead `HandshakeRetries` ticks. Eviction should happen in pending
// but not main hostmap
cumulative := time.Duration(0)
for i := 1; i <= DefaultHandshakeRetries+2; i++ {
cumulative += DefaultHandshakeTryInterval * time.Duration(i)
next_tick := now.Add(cumulative)
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
}
/*
for i := 0; i <= HandshakeRetries+1; i++ {
next_tick := now.Add(cumulative)
//l.Infoln(next_tick)
blah.NextOutboundHandshakeTimerTick(next_tick)
}
*/
/*
for i := 0; i <= HandshakeRetries+1; i++ {
next_tick := now.Add(time.Duration(i) * time.Second)
blah.NextOutboundHandshakeTimerTick(next_tick)
}
*/
/*
cumulative += HandshakeTryInterval*time.Duration(HandshakeRetries) + 3
next_tick := now.Add(cumulative)
l.Infoln(cumulative, next_tick)
blah.NextOutboundHandshakeTimerTick(next_tick)
*/
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(vpnIP))
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(12341234))
}
func Test_NewHandshakeManagerIndexcleanup(t *testing.T) {
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange}
mainHM := NewHostMap("test", vpncidr, preferredRanges)
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}, defaultHandshakeConfig)
now := time.Now()
blah.NextInboundHandshakeTimerTick(now)
hostinfo, _ := blah.AddIndex(12341234, &ConnectionState{})
// Pretned we have an index too
blah.pendingHostMap.AddVpnIPHostInfo(101010, hostinfo)
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(101010))
for i := 1; i <= DefaultHandshakeRetries+2; i++ {
next_tick := now.Add(DefaultHandshakeTryInterval * time.Duration(i))
blah.NextInboundHandshakeTimerTick(next_tick)
}
next_tick := now.Add(DefaultHandshakeTryInterval*DefaultHandshakeRetries + 3)
blah.NextInboundHandshakeTimerTick(next_tick)
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(101010))
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(12341234))
}
type mockEncWriter struct { type mockEncWriter struct {
} }
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) { func (mw *mockEncWriter) SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) {
return return
} }
func (mw *mockEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) { func (mw *mockEncWriter) SendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) {
return return
} }
func (mw *mockEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
return
}
func (mw *mockEncWriter) Handshake(vpnIP iputil.VpnIp) {}

Some files were not shown because too many files have changed in this diff Show More