mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-10 11:23:57 +01:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
105e0ec66c | ||
|
|
4870bb680d | ||
|
|
a1498ca8f8 | ||
|
|
9877648da9 | ||
|
|
8e0a7bcbb7 | ||
|
|
8c29b15c6d | ||
|
|
04d7a8ccba | ||
|
|
b55b9019a7 | ||
|
|
2e85d138cd | ||
|
|
9bfdfbafc1 |
22
.github/ISSUE_TEMPLATE/config.yml
vendored
22
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,21 +1,13 @@
|
|||||||
blank_issues_enabled: true
|
blank_issues_enabled: true
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: 💨 Performance Issues
|
|
||||||
url: https://github.com/slackhq/nebula/discussions/new/choose
|
|
||||||
about: 'We ask that you create a discussion instead of an issue for performance-related questions. This allows us to have a more open conversation about the issue and helps us to better understand the problem.'
|
|
||||||
|
|
||||||
- name: 📄 Documentation Issues
|
|
||||||
url: https://github.com/definednet/nebula-docs
|
|
||||||
about: "If you've found an issue with the website documentation, please file it in the nebula-docs repository."
|
|
||||||
|
|
||||||
- name: 📱 Mobile Nebula Issues
|
|
||||||
url: https://github.com/definednet/mobile_nebula
|
|
||||||
about: "If you're using the mobile Nebula app and have found an issue, please file it in the mobile_nebula repository."
|
|
||||||
|
|
||||||
- name: 📘 Documentation
|
- name: 📘 Documentation
|
||||||
url: https://nebula.defined.net/docs/
|
url: https://nebula.defined.net/docs/
|
||||||
about: 'The documentation is the best place to start if you are new to Nebula.'
|
about: Review documentation.
|
||||||
|
|
||||||
- name: 💁 Support/Chat
|
- name: 💁 Support/Chat
|
||||||
url: https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA
|
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
|
||||||
about: 'For faster support, join us on Slack for assistance!'
|
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
|
||||||
|
|
||||||
|
- name: 📱 Mobile Nebula
|
||||||
|
url: https://github.com/definednet/mobile_nebula
|
||||||
|
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'
|
||||||
|
|||||||
11
.github/pull_request_template.md
vendored
11
.github/pull_request_template.md
vendored
@ -1,11 +0,0 @@
|
|||||||
<!--
|
|
||||||
Thank you for taking the time to submit a pull request!
|
|
||||||
|
|
||||||
Please be sure to provide a clear description of what you're trying to achieve with the change.
|
|
||||||
|
|
||||||
- If you're submitting a new feature, please explain how to use it and document any new config options in the example config.
|
|
||||||
- If you're submitting a bugfix, please link the related issue or describe the circumstances surrounding the issue.
|
|
||||||
- If you're changing a default, explain why you believe the new default is appropriate for most users.
|
|
||||||
|
|
||||||
P.S. If you're only updating the README or other docs, please file a pull request here instead: https://github.com/DefinedNet/nebula-docs
|
|
||||||
-->
|
|
||||||
2
.github/workflows/gofmt.yml
vendored
2
.github/workflows/gofmt.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Install goimports
|
- name: Install goimports
|
||||||
|
|||||||
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@ -14,7 +14,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@ -37,7 +37,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@ -70,12 +70,12 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Import certificates
|
- name: Import certificates
|
||||||
if: env.HAS_SIGNING_CREDS == 'true'
|
if: env.HAS_SIGNING_CREDS == 'true'
|
||||||
uses: Apple-Actions/import-codesign-certs@v5
|
uses: Apple-Actions/import-codesign-certs@v3
|
||||||
with:
|
with:
|
||||||
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
||||||
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
||||||
|
|||||||
3
.github/workflows/smoke-extra.yml
vendored
3
.github/workflows/smoke-extra.yml
vendored
@ -27,9 +27,6 @@ jobs:
|
|||||||
go-version-file: 'go.mod'
|
go-version-file: 'go.mod'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: add hashicorp source
|
|
||||||
run: wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg && echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
|
||||||
|
|
||||||
- name: install vagrant
|
- name: install vagrant
|
||||||
run: sudo apt-get update && sudo apt-get install -y vagrant virtualbox
|
run: sudo apt-get update && sudo apt-get install -y vagrant virtualbox
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/smoke.yml
vendored
2
.github/workflows/smoke.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
|
|||||||
10
.github/workflows/smoke/build.sh
vendored
10
.github/workflows/smoke/build.sh
vendored
@ -5,10 +5,6 @@ set -e -x
|
|||||||
rm -rf ./build
|
rm -rf ./build
|
||||||
mkdir ./build
|
mkdir ./build
|
||||||
|
|
||||||
# TODO: Assumes your docker bridge network is a /24, and the first container that launches will be .1
|
|
||||||
# - We could make this better by launching the lighthouse first and then fetching what IP it is.
|
|
||||||
NET="$(docker network inspect bridge -f '{{ range .IPAM.Config }}{{ .Subnet }}{{ end }}' | cut -d. -f1-3)"
|
|
||||||
|
|
||||||
(
|
(
|
||||||
cd build
|
cd build
|
||||||
|
|
||||||
@ -25,16 +21,16 @@ NET="$(docker network inspect bridge -f '{{ range .IPAM.Config }}{{ .Subnet }}{{
|
|||||||
../genconfig.sh >lighthouse1.yml
|
../genconfig.sh >lighthouse1.yml
|
||||||
|
|
||||||
HOST="host2" \
|
HOST="host2" \
|
||||||
LIGHTHOUSES="192.168.100.1 $NET.2:4242" \
|
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||||
../genconfig.sh >host2.yml
|
../genconfig.sh >host2.yml
|
||||||
|
|
||||||
HOST="host3" \
|
HOST="host3" \
|
||||||
LIGHTHOUSES="192.168.100.1 $NET.2:4242" \
|
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||||
INBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
INBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||||
../genconfig.sh >host3.yml
|
../genconfig.sh >host3.yml
|
||||||
|
|
||||||
HOST="host4" \
|
HOST="host4" \
|
||||||
LIGHTHOUSES="192.168.100.1 $NET.2:4242" \
|
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||||
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||||
../genconfig.sh >host4.yml
|
../genconfig.sh >host4.yml
|
||||||
|
|
||||||
|
|||||||
34
.github/workflows/smoke/smoke-vagrant.sh
vendored
34
.github/workflows/smoke/smoke-vagrant.sh
vendored
@ -29,13 +29,13 @@ docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
|||||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||||
|
|
||||||
vagrant up
|
vagrant up
|
||||||
vagrant ssh -c "cd /nebula && /nebula/$1-nebula -config host3.yml -test" -- -T
|
vagrant ssh -c "cd /nebula && /nebula/$1-nebula -config host3.yml -test"
|
||||||
|
|
||||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||||
sleep 1
|
sleep 1
|
||||||
vagrant ssh -c "cd /nebula && sudo sh -c 'echo \$\$ >/nebula/pid && exec /nebula/$1-nebula -config host3.yml'" 2>&1 -- -T | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
vagrant ssh -c "cd /nebula && sudo sh -c 'echo \$\$ >/nebula/pid && exec /nebula/$1-nebula -config host3.yml'" &
|
||||||
sleep 15
|
sleep 15
|
||||||
|
|
||||||
# grab tcpdump pcaps for debugging
|
# grab tcpdump pcaps for debugging
|
||||||
@ -46,8 +46,8 @@ docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host
|
|||||||
# vagrant ssh -c "tcpdump -i nebula1 -q -w - -U" 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
# vagrant ssh -c "tcpdump -i nebula1 -q -w - -U" 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||||
# vagrant ssh -c "tcpdump -i eth0 -q -w - -U" 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
# vagrant ssh -c "tcpdump -i eth0 -q -w - -U" 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||||
|
|
||||||
#docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||||
#vagrant ssh -c "ncat -nklv 0.0.0.0 2000" &
|
vagrant ssh -c "ncat -nklv 0.0.0.0 2000" &
|
||||||
#docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
#docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||||
#vagrant ssh -c "ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000" &
|
#vagrant ssh -c "ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000" &
|
||||||
|
|
||||||
@ -68,11 +68,11 @@ docker exec host2 ping -c1 192.168.100.1
|
|||||||
# Should fail because not allowed by host3 inbound firewall
|
# Should fail because not allowed by host3 inbound firewall
|
||||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||||
|
|
||||||
#set +x
|
set +x
|
||||||
#echo
|
echo
|
||||||
#echo " *** Testing ncat from host2"
|
echo " *** Testing ncat from host2"
|
||||||
#echo
|
echo
|
||||||
#set -x
|
set -x
|
||||||
# Should fail because not allowed by host3 inbound firewall
|
# Should fail because not allowed by host3 inbound firewall
|
||||||
#! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
#! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||||
#! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
#! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||||
@ -82,18 +82,18 @@ echo
|
|||||||
echo " *** Testing ping from host3"
|
echo " *** Testing ping from host3"
|
||||||
echo
|
echo
|
||||||
set -x
|
set -x
|
||||||
vagrant ssh -c "ping -c1 192.168.100.1" -- -T
|
vagrant ssh -c "ping -c1 192.168.100.1"
|
||||||
vagrant ssh -c "ping -c1 192.168.100.2" -- -T
|
vagrant ssh -c "ping -c1 192.168.100.2"
|
||||||
|
|
||||||
#set +x
|
set +x
|
||||||
#echo
|
echo
|
||||||
#echo " *** Testing ncat from host3"
|
echo " *** Testing ncat from host3"
|
||||||
#echo
|
echo
|
||||||
#set -x
|
set -x
|
||||||
#vagrant ssh -c "ncat -nzv -w5 192.168.100.2 2000"
|
#vagrant ssh -c "ncat -nzv -w5 192.168.100.2 2000"
|
||||||
#vagrant ssh -c "ncat -nzuv -w5 192.168.100.2 3000" | grep -q host2
|
#vagrant ssh -c "ncat -nzuv -w5 192.168.100.2 3000" | grep -q host2
|
||||||
|
|
||||||
vagrant ssh -c "sudo xargs kill </nebula/pid" -- -T
|
vagrant ssh -c "sudo xargs kill </nebula/pid"
|
||||||
docker exec host2 sh -c 'kill 1'
|
docker exec host2 sh -c 'kill 1'
|
||||||
docker exec lighthouse1 sh -c 'kill 1'
|
docker exec lighthouse1 sh -c 'kill 1'
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|||||||
36
.github/workflows/test.yml
vendored
36
.github/workflows/test.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@ -31,11 +31,6 @@ jobs:
|
|||||||
- name: Vet
|
- name: Vet
|
||||||
run: make vet
|
run: make vet
|
||||||
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v8
|
|
||||||
with:
|
|
||||||
version: v2.1
|
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
@ -60,7 +55,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@ -70,25 +65,7 @@ jobs:
|
|||||||
run: make test-boringcrypto
|
run: make test-boringcrypto
|
||||||
|
|
||||||
- name: End 2 end
|
- name: End 2 end
|
||||||
run: make e2e GOEXPERIMENT=boringcrypto CGO_ENABLED=1 TEST_ENV="TEST_LOGS=1" TEST_FLAGS="-v -ldflags -checklinkname=0"
|
run: make e2evv GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
|
|
||||||
test-linux-pkcs11:
|
|
||||||
name: Build and test on linux with pkcs11
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.22'
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: make bin-pkcs11
|
|
||||||
|
|
||||||
- name: Test
|
|
||||||
run: make test-pkcs11
|
|
||||||
|
|
||||||
test:
|
test:
|
||||||
name: Build and test on ${{ matrix.os }}
|
name: Build and test on ${{ matrix.os }}
|
||||||
@ -102,7 +79,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build nebula
|
- name: Build nebula
|
||||||
@ -114,11 +91,6 @@ jobs:
|
|||||||
- name: Vet
|
- name: Vet
|
||||||
run: make vet
|
run: make vet
|
||||||
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v8
|
|
||||||
with:
|
|
||||||
version: v2.1
|
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -5,8 +5,7 @@
|
|||||||
/nebula-darwin
|
/nebula-darwin
|
||||||
/nebula.exe
|
/nebula.exe
|
||||||
/nebula-cert.exe
|
/nebula-cert.exe
|
||||||
**/coverage.out
|
/coverage.out
|
||||||
**/cover.out
|
|
||||||
/cpu.pprof
|
/cpu.pprof
|
||||||
/build
|
/build
|
||||||
/*.tar.gz
|
/*.tar.gz
|
||||||
@ -14,6 +13,5 @@
|
|||||||
**.crt
|
**.crt
|
||||||
**.key
|
**.key
|
||||||
**.pem
|
**.pem
|
||||||
**.pub
|
|
||||||
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key
|
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key
|
||||||
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt
|
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt
|
||||||
|
|||||||
@ -1,23 +0,0 @@
|
|||||||
version: "2"
|
|
||||||
linters:
|
|
||||||
default: none
|
|
||||||
enable:
|
|
||||||
- testifylint
|
|
||||||
exclusions:
|
|
||||||
generated: lax
|
|
||||||
presets:
|
|
||||||
- comments
|
|
||||||
- common-false-positives
|
|
||||||
- legacy
|
|
||||||
- std-error-handling
|
|
||||||
paths:
|
|
||||||
- third_party$
|
|
||||||
- builtin$
|
|
||||||
- examples$
|
|
||||||
formatters:
|
|
||||||
exclusions:
|
|
||||||
generated: lax
|
|
||||||
paths:
|
|
||||||
- third_party$
|
|
||||||
- builtin$
|
|
||||||
- examples$
|
|
||||||
30
CHANGELOG.md
30
CHANGELOG.md
@ -7,12 +7,28 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
### Changed
|
## [1.9.6] - 2025-7-15
|
||||||
|
|
||||||
- `default_local_cidr_any` now defaults to false, meaning that any firewall rule
|
### Added
|
||||||
intended to target an `unsafe_routes` entry must explicitly declare it via the
|
|
||||||
`local_cidr` field. This is almost always the intended behavior. This flag is
|
- Support dropping inactive tunnels. This is disabled by default in this release but can be enabled with `tunnels.drop_inactive`. See example config for more details. (#1413)
|
||||||
deprecated and will be removed in a future release.
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix Darwin freeze due to presence of some Network Extensions (#1426)
|
||||||
|
- Ensure the same relay tunnel is always used when multiple relay tunnels are present (#1422)
|
||||||
|
- Fix Windows freeze due to ICMP error handling (#1412)
|
||||||
|
- Fix relay migration panic (#1403)
|
||||||
|
|
||||||
|
## [1.9.5] - 2024-12-05
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Gracefully ignore v2 certificates. (#1282)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix relays that refuse to re-establish after one of the remote tunnel pairs breaks. (#1277)
|
||||||
|
|
||||||
## [1.9.4] - 2024-09-09
|
## [1.9.4] - 2024-09-09
|
||||||
|
|
||||||
@ -671,7 +687,9 @@ created.)
|
|||||||
|
|
||||||
- Initial public release.
|
- Initial public release.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.4...HEAD
|
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.6...HEAD
|
||||||
|
[1.9.6]: https://github.com/slackhq/nebula/releases/tag/v1.9.6
|
||||||
|
[1.9.5]: https://github.com/slackhq/nebula/releases/tag/v1.9.5
|
||||||
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
|
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
|
||||||
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
|
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
|
||||||
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
|
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
|
||||||
|
|||||||
19
Makefile
19
Makefile
@ -40,7 +40,7 @@ ALL_LINUX = linux-amd64 \
|
|||||||
linux-mips64le \
|
linux-mips64le \
|
||||||
linux-mips-softfloat \
|
linux-mips-softfloat \
|
||||||
linux-riscv64 \
|
linux-riscv64 \
|
||||||
linux-loong64
|
linux-loong64
|
||||||
|
|
||||||
ALL_FREEBSD = freebsd-amd64 \
|
ALL_FREEBSD = freebsd-amd64 \
|
||||||
freebsd-arm64
|
freebsd-arm64
|
||||||
@ -63,7 +63,7 @@ ALL = $(ALL_LINUX) \
|
|||||||
e2e:
|
e2e:
|
||||||
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
|
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
|
||||||
|
|
||||||
e2ev: TEST_FLAGS += -v
|
e2ev: TEST_FLAGS = -v
|
||||||
e2ev: e2e
|
e2ev: e2e
|
||||||
|
|
||||||
e2evv: TEST_ENV += TEST_LOGS=1
|
e2evv: TEST_ENV += TEST_LOGS=1
|
||||||
@ -96,7 +96,7 @@ release-netbsd: $(ALL_NETBSD:%=build/nebula-%.tar.gz)
|
|||||||
|
|
||||||
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
|
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
|
||||||
|
|
||||||
BUILD_ARGS += -trimpath
|
BUILD_ARGS = -trimpath
|
||||||
|
|
||||||
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
|
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
|
||||||
mv $? .
|
mv $? .
|
||||||
@ -116,10 +116,6 @@ bin-freebsd-arm64: build/freebsd-arm64/nebula build/freebsd-arm64/nebula-cert
|
|||||||
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
|
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
|
||||||
mv $? .
|
mv $? .
|
||||||
|
|
||||||
bin-pkcs11: BUILD_ARGS += -tags pkcs11
|
|
||||||
bin-pkcs11: CGO_ENABLED = 1
|
|
||||||
bin-pkcs11: bin
|
|
||||||
|
|
||||||
bin:
|
bin:
|
||||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
||||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
||||||
@ -137,8 +133,6 @@ build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
|||||||
# boringcrypto
|
# boringcrypto
|
||||||
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||||
build/linux-amd64-boringcrypto/%: LDFLAGS += -checklinkname=0
|
|
||||||
build/linux-arm64-boringcrypto/%: LDFLAGS += -checklinkname=0
|
|
||||||
|
|
||||||
build/%/nebula: .FORCE
|
build/%/nebula: .FORCE
|
||||||
GOOS=$(firstword $(subst -, , $*)) \
|
GOOS=$(firstword $(subst -, , $*)) \
|
||||||
@ -172,10 +166,7 @@ test:
|
|||||||
go test -v ./...
|
go test -v ./...
|
||||||
|
|
||||||
test-boringcrypto:
|
test-boringcrypto:
|
||||||
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -ldflags "-checklinkname=0" -v ./...
|
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -v ./...
|
||||||
|
|
||||||
test-pkcs11:
|
|
||||||
CGO_ENABLED=1 go test -v -tags pkcs11 ./...
|
|
||||||
|
|
||||||
test-cov-html:
|
test-cov-html:
|
||||||
go test -coverprofile=coverage.out
|
go test -coverprofile=coverage.out
|
||||||
@ -198,7 +189,7 @@ bench-cpu-long:
|
|||||||
go test -bench=. -benchtime=60s -cpuprofile=cpu.pprof
|
go test -bench=. -benchtime=60s -cpuprofile=cpu.pprof
|
||||||
go tool pprof go-audit.test cpu.pprof
|
go tool pprof go-audit.test cpu.pprof
|
||||||
|
|
||||||
proto: nebula.pb.go cert/cert_v1.pb.go
|
proto: nebula.pb.go cert/cert.pb.go
|
||||||
|
|
||||||
nebula.pb.go: nebula.proto .FORCE
|
nebula.pb.go: nebula.proto .FORCE
|
||||||
go build github.com/gogo/protobuf/protoc-gen-gogofaster
|
go build github.com/gogo/protobuf/protoc-gen-gogofaster
|
||||||
|
|||||||
69
README.md
69
README.md
@ -4,7 +4,7 @@ It lets you seamlessly connect computers anywhere in the world. Nebula is portab
|
|||||||
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
|
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
|
||||||
|
|
||||||
Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
|
Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
|
||||||
and tunneling.
|
and tunneling, and each of those individual pieces existed before Nebula in various forms.
|
||||||
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
||||||
resulting in a sum that is greater than its individual parts.
|
resulting in a sum that is greater than its individual parts.
|
||||||
|
|
||||||
@ -12,7 +12,7 @@ Further documentation can be found [here](https://nebula.defined.net/docs/).
|
|||||||
|
|
||||||
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
||||||
|
|
||||||
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA).
|
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU).
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
@ -28,33 +28,33 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
|||||||
#### Distribution Packages
|
#### Distribution Packages
|
||||||
|
|
||||||
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
|
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
|
||||||
```sh
|
```
|
||||||
sudo pacman -S nebula
|
$ sudo pacman -S nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
||||||
```sh
|
```
|
||||||
sudo dnf install nebula
|
$ sudo dnf install nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
|
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
|
||||||
```sh
|
```
|
||||||
sudo apt install nebula
|
$ sudo apt install nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
|
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
|
||||||
```sh
|
```
|
||||||
sudo apk add nebula
|
$ sudo apk add nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/n/nebula.rb)
|
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
|
||||||
```sh
|
```
|
||||||
brew install nebula
|
$ brew install nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
|
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
|
||||||
```sh
|
```
|
||||||
docker pull nebulaoss/nebula
|
$ docker pull nebulaoss/nebula
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Mobile
|
#### Mobile
|
||||||
@ -64,10 +64,10 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
|||||||
|
|
||||||
## Technical Overview
|
## Technical Overview
|
||||||
|
|
||||||
Nebula is a mutually authenticated peer-to-peer software-defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/).
|
Nebula is a mutually authenticated peer-to-peer software defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/).
|
||||||
Nebula uses certificates to assert a node's IP address, name, and membership within user-defined groups.
|
Nebula uses certificates to assert a node's IP address, name, and membership within user-defined groups.
|
||||||
Nebula's user-defined groups allow for provider agnostic traffic filtering between nodes.
|
Nebula's user-defined groups allow for provider agnostic traffic filtering between nodes.
|
||||||
Discovery nodes (aka lighthouses) allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
|
Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
|
||||||
Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme.
|
Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme.
|
||||||
|
|
||||||
Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration.
|
Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration.
|
||||||
@ -82,34 +82,28 @@ To set up a Nebula network, you'll need:
|
|||||||
|
|
||||||
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
|
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
|
||||||
|
|
||||||
Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $6/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses.
|
Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $5/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses.
|
||||||
|
|
||||||
|
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
|
||||||
|
|
||||||
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
|
|
||||||
|
|
||||||
#### 3. A Nebula certificate authority, which will be the root of trust for a particular Nebula network.
|
#### 3. A Nebula certificate authority, which will be the root of trust for a particular Nebula network.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./nebula-cert ca -name "Myorganization, Inc"
|
./nebula-cert ca -name "Myorganization, Inc"
|
||||||
```
|
```
|
||||||
|
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
|
||||||
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
|
|
||||||
|
|
||||||
**Be aware!** By default, certificate authorities have a 1-year lifetime before expiration. See [this guide](https://nebula.defined.net/docs/guides/rotating-certificate-authority/) for details on rotating a CA.
|
|
||||||
|
|
||||||
#### 4. Nebula host keys and certificates generated from that certificate authority
|
#### 4. Nebula host keys and certificates generated from that certificate authority
|
||||||
|
|
||||||
This assumes you have four nodes, named lighthouse1, laptop, server1, host3. You can name the nodes any way you'd like, including FQDN. You'll also need to choose IP addresses and the associated subnet. In this example, we are creating a nebula network that will use 192.168.100.x/24 as its network range. This example also demonstrates nebula groups, which can later be used to define traffic rules in a nebula network.
|
This assumes you have four nodes, named lighthouse1, laptop, server1, host3. You can name the nodes any way you'd like, including FQDN. You'll also need to choose IP addresses and the associated subnet. In this example, we are creating a nebula network that will use 192.168.100.x/24 as its network range. This example also demonstrates nebula groups, which can later be used to define traffic rules in a nebula network.
|
||||||
```sh
|
```
|
||||||
./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24"
|
./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24"
|
||||||
./nebula-cert sign -name "laptop" -ip "192.168.100.2/24" -groups "laptop,home,ssh"
|
./nebula-cert sign -name "laptop" -ip "192.168.100.2/24" -groups "laptop,home,ssh"
|
||||||
./nebula-cert sign -name "server1" -ip "192.168.100.9/24" -groups "servers"
|
./nebula-cert sign -name "server1" -ip "192.168.100.9/24" -groups "servers"
|
||||||
./nebula-cert sign -name "host3" -ip "192.168.100.10/24"
|
./nebula-cert sign -name "host3" -ip "192.168.100.10/24"
|
||||||
```
|
```
|
||||||
|
|
||||||
By default, host certificates will expire 1 second before the CA expires. Use the `-duration` flag to specify a shorter lifetime.
|
|
||||||
|
|
||||||
#### 5. Configuration files for each host
|
#### 5. Configuration files for each host
|
||||||
|
|
||||||
Download a copy of the nebula [example configuration](https://github.com/slackhq/nebula/blob/master/examples/config.yml).
|
Download a copy of the nebula [example configuration](https://github.com/slackhq/nebula/blob/master/examples/config.yml).
|
||||||
|
|
||||||
* On the lighthouse node, you'll need to ensure `am_lighthouse: true` is set.
|
* On the lighthouse node, you'll need to ensure `am_lighthouse: true` is set.
|
||||||
@ -124,13 +118,10 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
|
|||||||
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
|
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
|
||||||
|
|
||||||
#### 7. Run nebula on each host
|
#### 7. Run nebula on each host
|
||||||
|
```
|
||||||
```sh
|
|
||||||
./nebula -config /path/to/config.yml
|
./nebula -config /path/to/config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
For more detailed instructions, [find the full documentation here](https://nebula.defined.net/docs/).
|
|
||||||
|
|
||||||
## Building Nebula from source
|
## Building Nebula from source
|
||||||
|
|
||||||
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
|
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
|
||||||
@ -149,10 +140,8 @@ The default curve used for cryptographic handshakes and signatures is Curve25519
|
|||||||
|
|
||||||
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
|
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
|
||||||
|
|
||||||
```sh
|
make bin-boringcrypto
|
||||||
make bin-boringcrypto
|
make release-boringcrypto
|
||||||
make release-boringcrypto
|
|
||||||
```
|
|
||||||
|
|
||||||
This is not the recommended default deployment, but may be useful based on your compliance requirements.
|
This is not the recommended default deployment, but may be useful based on your compliance requirements.
|
||||||
|
|
||||||
@ -160,3 +149,5 @@ This is not the recommended default deployment, but may be useful based on your
|
|||||||
|
|
||||||
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.
|
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -36,7 +36,7 @@ type AllowListNameRule struct {
|
|||||||
|
|
||||||
func NewLocalAllowListFromConfig(c *config.C, k string) (*LocalAllowList, error) {
|
func NewLocalAllowListFromConfig(c *config.C, k string) (*LocalAllowList, error) {
|
||||||
var nameRules []AllowListNameRule
|
var nameRules []AllowListNameRule
|
||||||
handleKey := func(key string, value any) (bool, error) {
|
handleKey := func(key string, value interface{}) (bool, error) {
|
||||||
if key == "interfaces" {
|
if key == "interfaces" {
|
||||||
var err error
|
var err error
|
||||||
nameRules, err = getAllowListInterfaces(k, value)
|
nameRules, err = getAllowListInterfaces(k, value)
|
||||||
@ -70,7 +70,7 @@ func NewRemoteAllowListFromConfig(c *config.C, k, rangesKey string) (*RemoteAllo
|
|||||||
|
|
||||||
// If the handleKey func returns true, the rest of the parsing is skipped
|
// If the handleKey func returns true, the rest of the parsing is skipped
|
||||||
// for this key. This allows parsing of special values like `interfaces`.
|
// for this key. This allows parsing of special values like `interfaces`.
|
||||||
func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value any) (bool, error)) (*AllowList, error) {
|
func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
|
||||||
r := c.Get(k)
|
r := c.Get(k)
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -81,8 +81,8 @@ func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, va
|
|||||||
|
|
||||||
// If the handleKey func returns true, the rest of the parsing is skipped
|
// If the handleKey func returns true, the rest of the parsing is skipped
|
||||||
// for this key. This allows parsing of special values like `interfaces`.
|
// for this key. This allows parsing of special values like `interfaces`.
|
||||||
func newAllowList(k string, raw any, handleKey func(key string, value any) (bool, error)) (*AllowList, error) {
|
func newAllowList(k string, raw interface{}, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
|
||||||
rawMap, ok := raw.(map[string]any)
|
rawMap, ok := raw.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||||
}
|
}
|
||||||
@ -100,7 +100,12 @@ func newAllowList(k string, raw any, handleKey func(key string, value any) (bool
|
|||||||
rules4 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
rules4 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
||||||
rules6 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
rules6 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
||||||
|
|
||||||
for rawCIDR, rawValue := range rawMap {
|
for rawKey, rawValue := range rawMap {
|
||||||
|
rawCIDR, ok := rawKey.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
|
}
|
||||||
|
|
||||||
if handleKey != nil {
|
if handleKey != nil {
|
||||||
handled, err := handleKey(rawCIDR, rawValue)
|
handled, err := handleKey(rawCIDR, rawValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -111,7 +116,7 @@ func newAllowList(k string, raw any, handleKey func(key string, value any) (bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
value, ok := config.AsBool(rawValue)
|
value, ok := rawValue.(bool)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
||||||
}
|
}
|
||||||
@ -123,6 +128,7 @@ func newAllowList(k string, raw any, handleKey func(key string, value any) (bool
|
|||||||
|
|
||||||
ipNet = netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits())
|
ipNet = netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits())
|
||||||
|
|
||||||
|
// TODO: should we error on duplicate CIDRs in the config?
|
||||||
tree.Insert(ipNet, value)
|
tree.Insert(ipNet, value)
|
||||||
|
|
||||||
maskBits := ipNet.Bits()
|
maskBits := ipNet.Bits()
|
||||||
@ -168,18 +174,22 @@ func newAllowList(k string, raw any, handleKey func(key string, value any) (bool
|
|||||||
return &AllowList{cidrTree: tree}, nil
|
return &AllowList{cidrTree: tree}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAllowListInterfaces(k string, v any) ([]AllowListNameRule, error) {
|
func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) {
|
||||||
var nameRules []AllowListNameRule
|
var nameRules []AllowListNameRule
|
||||||
|
|
||||||
rawRules, ok := v.(map[string]any)
|
rawRules, ok := v.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
|
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
firstEntry := true
|
firstEntry := true
|
||||||
var allValues bool
|
var allValues bool
|
||||||
for name, rawAllow := range rawRules {
|
for rawName, rawAllow := range rawRules {
|
||||||
allow, ok := config.AsBool(rawAllow)
|
name, ok := rawName.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName)
|
||||||
|
}
|
||||||
|
allow, ok := rawAllow.(bool)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
|
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
|
||||||
}
|
}
|
||||||
@ -215,11 +225,16 @@ func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error
|
|||||||
|
|
||||||
remoteAllowRanges := new(bart.Table[*AllowList])
|
remoteAllowRanges := new(bart.Table[*AllowList])
|
||||||
|
|
||||||
rawMap, ok := value.(map[string]any)
|
rawMap, ok := value.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||||
}
|
}
|
||||||
for rawCIDR, rawValue := range rawMap {
|
for rawKey, rawValue := range rawMap {
|
||||||
|
rawCIDR, ok := rawKey.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
|
}
|
||||||
|
|
||||||
allowList, err := newAllowList(fmt.Sprintf("%s.%s", k, rawCIDR), rawValue, nil)
|
allowList, err := newAllowList(fmt.Sprintf("%s.%s", k, rawCIDR), rawValue, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -236,20 +251,20 @@ func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error
|
|||||||
return remoteAllowRanges, nil
|
return remoteAllowRanges, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *AllowList) Allow(addr netip.Addr) bool {
|
func (al *AllowList) Allow(ip netip.Addr) bool {
|
||||||
if al == nil {
|
if al == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
result, _ := al.cidrTree.Lookup(addr)
|
result, _ := al.cidrTree.Lookup(ip)
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *LocalAllowList) Allow(udpAddr netip.Addr) bool {
|
func (al *LocalAllowList) Allow(ip netip.Addr) bool {
|
||||||
if al == nil {
|
if al == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return al.AllowList.Allow(udpAddr)
|
return al.AllowList.Allow(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *LocalAllowList) AllowName(name string) bool {
|
func (al *LocalAllowList) AllowName(name string) bool {
|
||||||
@ -267,37 +282,23 @@ func (al *LocalAllowList) AllowName(name string) bool {
|
|||||||
return !al.nameRules[0].Allow
|
return !al.nameRules[0].Allow
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *RemoteAllowList) AllowUnknownVpnAddr(vpnAddr netip.Addr) bool {
|
func (al *RemoteAllowList) AllowUnknownVpnIp(ip netip.Addr) bool {
|
||||||
if al == nil {
|
if al == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return al.AllowList.Allow(vpnAddr)
|
return al.AllowList.Allow(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *RemoteAllowList) Allow(vpnAddr netip.Addr, udpAddr netip.Addr) bool {
|
func (al *RemoteAllowList) Allow(vpnIp netip.Addr, ip netip.Addr) bool {
|
||||||
if !al.getInsideAllowList(vpnAddr).Allow(udpAddr) {
|
if !al.getInsideAllowList(vpnIp).Allow(ip) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return al.AllowList.Allow(udpAddr)
|
return al.AllowList.Allow(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (al *RemoteAllowList) AllowAll(vpnAddrs []netip.Addr, udpAddr netip.Addr) bool {
|
func (al *RemoteAllowList) getInsideAllowList(vpnIp netip.Addr) *AllowList {
|
||||||
if !al.AllowList.Allow(udpAddr) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, vpnAddr := range vpnAddrs {
|
|
||||||
if !al.getInsideAllowList(vpnAddr).Allow(udpAddr) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (al *RemoteAllowList) getInsideAllowList(vpnAddr netip.Addr) *AllowList {
|
|
||||||
if al.insideAllowLists != nil {
|
if al.insideAllowLists != nil {
|
||||||
inside, ok := al.insideAllowLists.Lookup(vpnAddr)
|
inside, ok := al.insideAllowLists.Lookup(vpnIp)
|
||||||
if ok {
|
if ok {
|
||||||
return inside
|
return inside
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,33 +9,32 @@ import (
|
|||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewAllowListFromConfig(t *testing.T) {
|
func TestNewAllowListFromConfig(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"192.168.0.0": true,
|
"192.168.0.0": true,
|
||||||
}
|
}
|
||||||
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
||||||
require.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0. netip.ParsePrefix(\"192.168.0.0\"): no '/'")
|
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0. netip.ParsePrefix(\"192.168.0.0\"): no '/'")
|
||||||
assert.Nil(t, r)
|
assert.Nil(t, r)
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"192.168.0.0/16": "abc",
|
"192.168.0.0/16": "abc",
|
||||||
}
|
}
|
||||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
require.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
|
assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"192.168.0.0/16": true,
|
"192.168.0.0/16": true,
|
||||||
"10.0.0.0/8": false,
|
"10.0.0.0/8": false,
|
||||||
}
|
}
|
||||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
|
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"0.0.0.0/0": true,
|
"0.0.0.0/0": true,
|
||||||
"10.0.0.0/8": false,
|
"10.0.0.0/8": false,
|
||||||
"10.42.42.0/24": true,
|
"10.42.42.0/24": true,
|
||||||
@ -43,9 +42,9 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||||||
"fd00:fd00::/16": false,
|
"fd00:fd00::/16": false,
|
||||||
}
|
}
|
||||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||||
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
|
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"0.0.0.0/0": true,
|
"0.0.0.0/0": true,
|
||||||
"10.0.0.0/8": false,
|
"10.0.0.0/8": false,
|
||||||
"10.42.42.0/24": true,
|
"10.42.42.0/24": true,
|
||||||
@ -55,7 +54,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||||||
assert.NotNil(t, r)
|
assert.NotNil(t, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"0.0.0.0/0": true,
|
"0.0.0.0/0": true,
|
||||||
"10.0.0.0/8": false,
|
"10.0.0.0/8": false,
|
||||||
"10.42.42.0/24": true,
|
"10.42.42.0/24": true,
|
||||||
@ -70,25 +69,25 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||||||
|
|
||||||
// Test interface names
|
// Test interface names
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"interfaces": map[string]any{
|
"interfaces": map[interface{}]interface{}{
|
||||||
`docker.*`: "foo",
|
`docker.*`: "foo",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
lr, err := NewLocalAllowListFromConfig(c, "allowlist")
|
lr, err := NewLocalAllowListFromConfig(c, "allowlist")
|
||||||
require.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
|
assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"interfaces": map[string]any{
|
"interfaces": map[interface{}]interface{}{
|
||||||
`docker.*`: false,
|
`docker.*`: false,
|
||||||
`eth.*`: true,
|
`eth.*`: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
|
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
|
||||||
require.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
|
assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
|
||||||
|
|
||||||
c.Settings["allowlist"] = map[string]any{
|
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||||
"interfaces": map[string]any{
|
"interfaces": map[interface{}]interface{}{
|
||||||
`docker.*`: false,
|
`docker.*`: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -99,7 +98,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAllowList_Allow(t *testing.T) {
|
func TestAllowList_Allow(t *testing.T) {
|
||||||
assert.True(t, ((*AllowList)(nil)).Allow(netip.MustParseAddr("1.1.1.1")))
|
assert.Equal(t, true, ((*AllowList)(nil)).Allow(netip.MustParseAddr("1.1.1.1")))
|
||||||
|
|
||||||
tree := new(bart.Table[bool])
|
tree := new(bart.Table[bool])
|
||||||
tree.Insert(netip.MustParsePrefix("0.0.0.0/0"), true)
|
tree.Insert(netip.MustParsePrefix("0.0.0.0/0"), true)
|
||||||
@ -112,17 +111,17 @@ func TestAllowList_Allow(t *testing.T) {
|
|||||||
tree.Insert(netip.MustParsePrefix("::2/128"), false)
|
tree.Insert(netip.MustParsePrefix("::2/128"), false)
|
||||||
al := &AllowList{cidrTree: tree}
|
al := &AllowList{cidrTree: tree}
|
||||||
|
|
||||||
assert.True(t, al.Allow(netip.MustParseAddr("1.1.1.1")))
|
assert.Equal(t, true, al.Allow(netip.MustParseAddr("1.1.1.1")))
|
||||||
assert.False(t, al.Allow(netip.MustParseAddr("10.0.0.4")))
|
assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.0.0.4")))
|
||||||
assert.True(t, al.Allow(netip.MustParseAddr("10.42.42.42")))
|
assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.42.42")))
|
||||||
assert.False(t, al.Allow(netip.MustParseAddr("10.42.42.41")))
|
assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.42.42.41")))
|
||||||
assert.True(t, al.Allow(netip.MustParseAddr("10.42.0.1")))
|
assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.0.1")))
|
||||||
assert.True(t, al.Allow(netip.MustParseAddr("::1")))
|
assert.Equal(t, true, al.Allow(netip.MustParseAddr("::1")))
|
||||||
assert.False(t, al.Allow(netip.MustParseAddr("::2")))
|
assert.Equal(t, false, al.Allow(netip.MustParseAddr("::2")))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLocalAllowList_AllowName(t *testing.T) {
|
func TestLocalAllowList_AllowName(t *testing.T) {
|
||||||
assert.True(t, ((*LocalAllowList)(nil)).AllowName("docker0"))
|
assert.Equal(t, true, ((*LocalAllowList)(nil)).AllowName("docker0"))
|
||||||
|
|
||||||
rules := []AllowListNameRule{
|
rules := []AllowListNameRule{
|
||||||
{Name: regexp.MustCompile("^docker.*$"), Allow: false},
|
{Name: regexp.MustCompile("^docker.*$"), Allow: false},
|
||||||
@ -130,9 +129,9 @@ func TestLocalAllowList_AllowName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
al := &LocalAllowList{nameRules: rules}
|
al := &LocalAllowList{nameRules: rules}
|
||||||
|
|
||||||
assert.False(t, al.AllowName("docker0"))
|
assert.Equal(t, false, al.AllowName("docker0"))
|
||||||
assert.False(t, al.AllowName("tun0"))
|
assert.Equal(t, false, al.AllowName("tun0"))
|
||||||
assert.True(t, al.AllowName("eth0"))
|
assert.Equal(t, true, al.AllowName("eth0"))
|
||||||
|
|
||||||
rules = []AllowListNameRule{
|
rules = []AllowListNameRule{
|
||||||
{Name: regexp.MustCompile("^eth.*$"), Allow: true},
|
{Name: regexp.MustCompile("^eth.*$"), Allow: true},
|
||||||
@ -140,7 +139,7 @@ func TestLocalAllowList_AllowName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
al = &LocalAllowList{nameRules: rules}
|
al = &LocalAllowList{nameRules: rules}
|
||||||
|
|
||||||
assert.False(t, al.AllowName("docker0"))
|
assert.Equal(t, false, al.AllowName("docker0"))
|
||||||
assert.True(t, al.AllowName("eth0"))
|
assert.Equal(t, true, al.AllowName("eth0"))
|
||||||
assert.True(t, al.AllowName("ens5"))
|
assert.Equal(t, true, al.AllowName("ens5"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,11 +21,7 @@ type calculatedRemote struct {
|
|||||||
port uint32
|
port uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCalculatedRemote(cidr, maskCidr netip.Prefix, port int) (*calculatedRemote, error) {
|
func newCalculatedRemote(maskCidr netip.Prefix, port int) (*calculatedRemote, error) {
|
||||||
if maskCidr.Addr().BitLen() != cidr.Addr().BitLen() {
|
|
||||||
return nil, fmt.Errorf("invalid mask: %s for cidr: %s", maskCidr, cidr)
|
|
||||||
}
|
|
||||||
|
|
||||||
masked := maskCidr.Masked()
|
masked := maskCidr.Masked()
|
||||||
if port < 0 || port > math.MaxUint16 {
|
if port < 0 || port > math.MaxUint16 {
|
||||||
return nil, fmt.Errorf("invalid port: %d", port)
|
return nil, fmt.Errorf("invalid port: %d", port)
|
||||||
@ -42,38 +38,32 @@ func (c *calculatedRemote) String() string {
|
|||||||
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *calculatedRemote) ApplyV4(addr netip.Addr) *V4AddrPort {
|
func (c *calculatedRemote) Apply(ip netip.Addr) *Ip4AndPort {
|
||||||
// Combine the masked bytes of the "mask" IP with the unmasked bytes of the overlay IP
|
// Combine the masked bytes of the "mask" IP with the unmasked bytes
|
||||||
|
// of the overlay IP
|
||||||
|
if c.ipNet.Addr().Is4() {
|
||||||
|
return c.apply4(ip)
|
||||||
|
}
|
||||||
|
return c.apply6(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *calculatedRemote) apply4(ip netip.Addr) *Ip4AndPort {
|
||||||
|
//TODO: IPV6-WORK this can be less crappy
|
||||||
maskb := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
|
maskb := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
|
||||||
mask := binary.BigEndian.Uint32(maskb[:])
|
mask := binary.BigEndian.Uint32(maskb[:])
|
||||||
|
|
||||||
b := c.mask.Addr().As4()
|
b := c.mask.Addr().As4()
|
||||||
maskAddr := binary.BigEndian.Uint32(b[:])
|
maskIp := binary.BigEndian.Uint32(b[:])
|
||||||
|
|
||||||
b = addr.As4()
|
b = ip.As4()
|
||||||
intAddr := binary.BigEndian.Uint32(b[:])
|
intIp := binary.BigEndian.Uint32(b[:])
|
||||||
|
|
||||||
return &V4AddrPort{(maskAddr & mask) | (intAddr & ^mask), c.port}
|
return &Ip4AndPort{(maskIp & mask) | (intIp & ^mask), c.port}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *calculatedRemote) ApplyV6(addr netip.Addr) *V6AddrPort {
|
func (c *calculatedRemote) apply6(ip netip.Addr) *Ip4AndPort {
|
||||||
mask := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
|
//TODO: IPV6-WORK
|
||||||
maskAddr := c.mask.Addr().As16()
|
panic("Can not calculate ipv6 remote addresses")
|
||||||
calcAddr := addr.As16()
|
|
||||||
|
|
||||||
ap := V6AddrPort{Port: c.port}
|
|
||||||
|
|
||||||
maskb := binary.BigEndian.Uint64(mask[:8])
|
|
||||||
maskAddrb := binary.BigEndian.Uint64(maskAddr[:8])
|
|
||||||
calcAddrb := binary.BigEndian.Uint64(calcAddr[:8])
|
|
||||||
ap.Hi = (maskAddrb & maskb) | (calcAddrb & ^maskb)
|
|
||||||
|
|
||||||
maskb = binary.BigEndian.Uint64(mask[8:])
|
|
||||||
maskAddrb = binary.BigEndian.Uint64(maskAddr[8:])
|
|
||||||
calcAddrb = binary.BigEndian.Uint64(calcAddr[8:])
|
|
||||||
ap.Lo = (maskAddrb & maskb) | (calcAddrb & ^maskb)
|
|
||||||
|
|
||||||
return &ap
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calculatedRemote], error) {
|
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calculatedRemote], error) {
|
||||||
@ -84,17 +74,23 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calcu
|
|||||||
|
|
||||||
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
||||||
|
|
||||||
rawMap, ok := value.(map[string]any)
|
rawMap, ok := value.(map[any]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||||
}
|
}
|
||||||
for rawCIDR, rawValue := range rawMap {
|
for rawKey, rawValue := range rawMap {
|
||||||
|
rawCIDR, ok := rawKey.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||||
|
}
|
||||||
|
|
||||||
cidr, err := netip.ParsePrefix(rawCIDR)
|
cidr, err := netip.ParsePrefix(rawCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
}
|
}
|
||||||
|
|
||||||
entry, err := newCalculatedRemotesListFromConfig(cidr, rawValue)
|
//TODO: IPV6-WORK this does not verify that rawValue contains the same bits as cidr here
|
||||||
|
entry, err := newCalculatedRemotesListFromConfig(rawValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
||||||
}
|
}
|
||||||
@ -105,7 +101,7 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calcu
|
|||||||
return calculatedRemotes, nil
|
return calculatedRemotes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculatedRemote, error) {
|
func newCalculatedRemotesListFromConfig(raw any) ([]*calculatedRemote, error) {
|
||||||
rawList, ok := raw.([]any)
|
rawList, ok := raw.([]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("calculated_remotes entry has invalid type: %T", raw)
|
return nil, fmt.Errorf("calculated_remotes entry has invalid type: %T", raw)
|
||||||
@ -113,7 +109,7 @@ func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculat
|
|||||||
|
|
||||||
var l []*calculatedRemote
|
var l []*calculatedRemote
|
||||||
for _, e := range rawList {
|
for _, e := range rawList {
|
||||||
c, err := newCalculatedRemotesEntryFromConfig(cidr, e)
|
c, err := newCalculatedRemotesEntryFromConfig(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("calculated_remotes entry: %w", err)
|
return nil, fmt.Errorf("calculated_remotes entry: %w", err)
|
||||||
}
|
}
|
||||||
@ -123,8 +119,8 @@ func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculat
|
|||||||
return l, nil
|
return l, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculatedRemote, error) {
|
func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
||||||
rawMap, ok := raw.(map[string]any)
|
rawMap, ok := raw.(map[any]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid type: %T", raw)
|
return nil, fmt.Errorf("invalid type: %T", raw)
|
||||||
}
|
}
|
||||||
@ -159,5 +155,5 @@ func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculate
|
|||||||
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newCalculatedRemote(cidr, maskCidr, port)
|
return newCalculatedRemote(maskCidr, port)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,73 +9,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestCalculatedRemoteApply(t *testing.T) {
|
func TestCalculatedRemoteApply(t *testing.T) {
|
||||||
// Test v4 addresses
|
ipNet, err := netip.ParsePrefix("192.168.1.0/24")
|
||||||
ipNet := netip.MustParsePrefix("192.168.1.0/24")
|
require.NoError(t, err)
|
||||||
c, err := newCalculatedRemote(ipNet, ipNet, 4242)
|
|
||||||
|
c, err := newCalculatedRemote(ipNet, 4242)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
input, err := netip.ParseAddr("10.0.10.182")
|
input, err := netip.ParseAddr("10.0.10.182")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
expected, err := netip.ParseAddr("192.168.1.182")
|
expected, err := netip.ParseAddr("192.168.1.182")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, netAddrToProtoV4AddrPort(expected, 4242), c.ApplyV4(input))
|
assert.Equal(t, NewIp4AndPortFromNetIP(expected, 4242), c.Apply(input))
|
||||||
|
|
||||||
// Test v6 addresses
|
|
||||||
ipNet = netip.MustParsePrefix("ffff:ffff:ffff:ffff::0/64")
|
|
||||||
c, err = newCalculatedRemote(ipNet, ipNet, 4242)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
input, err = netip.ParseAddr("beef:beef:beef:beef:beef:beef:beef:beef")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expected, err = netip.ParseAddr("ffff:ffff:ffff:ffff:beef:beef:beef:beef")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, netAddrToProtoV6AddrPort(expected, 4242), c.ApplyV6(input))
|
|
||||||
|
|
||||||
// Test v6 addresses part 2
|
|
||||||
ipNet = netip.MustParsePrefix("ffff:ffff:ffff:ffff:ffff::0/80")
|
|
||||||
c, err = newCalculatedRemote(ipNet, ipNet, 4242)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
input, err = netip.ParseAddr("beef:beef:beef:beef:beef:beef:beef:beef")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expected, err = netip.ParseAddr("ffff:ffff:ffff:ffff:ffff:beef:beef:beef")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, netAddrToProtoV6AddrPort(expected, 4242), c.ApplyV6(input))
|
|
||||||
|
|
||||||
// Test v6 addresses part 2
|
|
||||||
ipNet = netip.MustParsePrefix("ffff:ffff:ffff::0/48")
|
|
||||||
c, err = newCalculatedRemote(ipNet, ipNet, 4242)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
input, err = netip.ParseAddr("beef:beef:beef:beef:beef:beef:beef:beef")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expected, err = netip.ParseAddr("ffff:ffff:ffff:beef:beef:beef:beef:beef")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, netAddrToProtoV6AddrPort(expected, 4242), c.ApplyV6(input))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_newCalculatedRemote(t *testing.T) {
|
|
||||||
c, err := newCalculatedRemote(netip.MustParsePrefix("1::1/128"), netip.MustParsePrefix("1.0.0.0/32"), 4242)
|
|
||||||
require.EqualError(t, err, "invalid mask: 1.0.0.0/32 for cidr: 1::1/128")
|
|
||||||
require.Nil(t, c)
|
|
||||||
|
|
||||||
c, err = newCalculatedRemote(netip.MustParsePrefix("1.0.0.0/32"), netip.MustParsePrefix("1::1/128"), 4242)
|
|
||||||
require.EqualError(t, err, "invalid mask: 1::1/128 for cidr: 1.0.0.0/32")
|
|
||||||
require.Nil(t, c)
|
|
||||||
|
|
||||||
c, err = newCalculatedRemote(netip.MustParsePrefix("1.0.0.0/32"), netip.MustParsePrefix("1.0.0.0/32"), 4242)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, c)
|
|
||||||
|
|
||||||
c, err = newCalculatedRemote(netip.MustParsePrefix("1::1/128"), netip.MustParsePrefix("1::1/128"), 4242)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, c)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
GO111MODULE = on
|
GO111MODULE = on
|
||||||
export GO111MODULE
|
export GO111MODULE
|
||||||
|
|
||||||
cert_v1.pb.go: cert_v1.proto .FORCE
|
cert.pb.go: cert.proto .FORCE
|
||||||
go build google.golang.org/protobuf/cmd/protoc-gen-go
|
go build google.golang.org/protobuf/cmd/protoc-gen-go
|
||||||
PATH="$(CURDIR):$(PATH)" protoc --go_out=. --go_opt=paths=source_relative $<
|
PATH="$(CURDIR):$(PATH)" protoc --go_out=. --go_opt=paths=source_relative $<
|
||||||
rm protoc-gen-go
|
rm protoc-gen-go
|
||||||
|
|||||||
@ -2,25 +2,14 @@
|
|||||||
|
|
||||||
This is a library for interacting with `nebula` style certificates and authorities.
|
This is a library for interacting with `nebula` style certificates and authorities.
|
||||||
|
|
||||||
There are now 2 versions of `nebula` certificates:
|
A `protobuf` definition of the certificate format is also included
|
||||||
|
|
||||||
## v1
|
### Compiling the protobuf definition
|
||||||
|
|
||||||
This version is deprecated.
|
Make sure you have `protoc` installed.
|
||||||
|
|
||||||
A `protobuf` definition of the certificate format is included at `cert_v1.proto`
|
|
||||||
|
|
||||||
To compile the definition you will need `protoc` installed.
|
|
||||||
|
|
||||||
To compile for `go` with the same version of protobuf specified in go.mod:
|
To compile for `go` with the same version of protobuf specified in go.mod:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make proto
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
## v2
|
|
||||||
|
|
||||||
This is the latest version which uses asn.1 DER encoding. It can support ipv4 and ipv6 and tolerate
|
|
||||||
future certificate changes better than v1.
|
|
||||||
|
|
||||||
`cert_v2.asn1` defines the wire format and can be used to compile marshalers.
|
|
||||||
52
cert/asn1.go
52
cert/asn1.go
@ -1,52 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/crypto/cryptobyte"
|
|
||||||
"golang.org/x/crypto/cryptobyte/asn1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// readOptionalASN1Boolean reads an asn.1 boolean with a specific tag instead of a asn.1 tag wrapping a boolean with a value
|
|
||||||
// https://github.com/golang/go/issues/64811#issuecomment-1944446920
|
|
||||||
func readOptionalASN1Boolean(b *cryptobyte.String, out *bool, tag asn1.Tag, defaultValue bool) bool {
|
|
||||||
var present bool
|
|
||||||
var child cryptobyte.String
|
|
||||||
if !b.ReadOptionalASN1(&child, &present, tag) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !present {
|
|
||||||
*out = defaultValue
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we have 1 byte
|
|
||||||
if len(child) == 1 {
|
|
||||||
*out = child[0] > 0
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// readOptionalASN1Byte reads an asn.1 uint8 with a specific tag instead of a asn.1 tag wrapping a uint8 with a value
|
|
||||||
// Similar issue as with readOptionalASN1Boolean
|
|
||||||
func readOptionalASN1Byte(b *cryptobyte.String, out *byte, tag asn1.Tag, defaultValue byte) bool {
|
|
||||||
var present bool
|
|
||||||
var child cryptobyte.String
|
|
||||||
if !b.ReadOptionalASN1(&child, &present, tag) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !present {
|
|
||||||
*out = defaultValue
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we have 1 byte
|
|
||||||
if len(child) == 1 {
|
|
||||||
*out = child[0]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
148
cert/ca.go
Normal file
148
cert/ca.go
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
package cert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NebulaCAPool struct {
|
||||||
|
CAs map[string]*NebulaCertificate
|
||||||
|
certBlocklist map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCAPool creates a CAPool
|
||||||
|
func NewCAPool() *NebulaCAPool {
|
||||||
|
ca := NebulaCAPool{
|
||||||
|
CAs: make(map[string]*NebulaCertificate),
|
||||||
|
certBlocklist: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ca
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCAPoolFromBytes will create a new CA pool from the provided
|
||||||
|
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
||||||
|
// If the pool contains unsupported certificates, they will generate warnings
|
||||||
|
// in the []error return arg.
|
||||||
|
// If the pool contains any expired certificates, an ErrExpired will be
|
||||||
|
// returned along with the pool. The caller must handle any such errors.
|
||||||
|
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, []error, error) {
|
||||||
|
pool := NewCAPool()
|
||||||
|
var err error
|
||||||
|
var warnings []error
|
||||||
|
good := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
caPEMs, err = pool.AddCACertificate(caPEMs)
|
||||||
|
if errors.Is(err, ErrExpired) {
|
||||||
|
warnings = append(warnings, err)
|
||||||
|
} else if errors.Is(err, ErrInvalidPEMCertificateUnsupported) {
|
||||||
|
warnings = append(warnings, err)
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, warnings, err
|
||||||
|
} else {
|
||||||
|
// Only consider a good certificate if there were no errors present
|
||||||
|
good++
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if good == 0 {
|
||||||
|
return nil, warnings, errors.New("no valid CA certificates present")
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool, warnings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool
|
||||||
|
// Only the first pem encoded object will be consumed, any remaining bytes are returned.
|
||||||
|
// Parsed certificates will be verified and must be a CA
|
||||||
|
func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) {
|
||||||
|
c, pemBytes, err := UnmarshalNebulaCertificateFromPEM(pemBytes)
|
||||||
|
if err != nil {
|
||||||
|
return pemBytes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Details.IsCA {
|
||||||
|
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotCA)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.CheckSignature(c.Details.PublicKey) {
|
||||||
|
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotSelfSigned)
|
||||||
|
}
|
||||||
|
|
||||||
|
sum, err := c.Sha256Sum()
|
||||||
|
if err != nil {
|
||||||
|
return pemBytes, fmt.Errorf("could not calculate shasum for provided CA; error: %s; %s", err, c.Details.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
ncp.CAs[sum] = c
|
||||||
|
if c.Expired(time.Now()) {
|
||||||
|
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrExpired)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pemBytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlocklistFingerprint adds a cert fingerprint to the blocklist
|
||||||
|
func (ncp *NebulaCAPool) BlocklistFingerprint(f string) {
|
||||||
|
ncp.certBlocklist[f] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetCertBlocklist removes all previously blocklisted cert fingerprints
|
||||||
|
func (ncp *NebulaCAPool) ResetCertBlocklist() {
|
||||||
|
ncp.certBlocklist = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This uses an internal cache for Sha256Sum() that will not be invalidated
|
||||||
|
// automatically if you manually change any fields in the NebulaCertificate.
|
||||||
|
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
|
||||||
|
return ncp.isBlocklistedWithCache(c, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
|
||||||
|
func (ncp *NebulaCAPool) isBlocklistedWithCache(c *NebulaCertificate, useCache bool) bool {
|
||||||
|
h, err := c.sha256SumWithCache(useCache)
|
||||||
|
if err != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := ncp.certBlocklist[h]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCAForCert attempts to return the signing certificate for the provided certificate.
|
||||||
|
// No signature validation is performed
|
||||||
|
func (ncp *NebulaCAPool) GetCAForCert(c *NebulaCertificate) (*NebulaCertificate, error) {
|
||||||
|
if c.Details.Issuer == "" {
|
||||||
|
return nil, fmt.Errorf("no issuer in certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
signer, ok := ncp.CAs[c.Details.Issuer]
|
||||||
|
if ok {
|
||||||
|
return signer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("could not find ca for the certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFingerprints returns an array of trusted CA fingerprints
|
||||||
|
func (ncp *NebulaCAPool) GetFingerprints() []string {
|
||||||
|
fp := make([]string, len(ncp.CAs))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for k := range ncp.CAs {
|
||||||
|
fp[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return fp
|
||||||
|
}
|
||||||
296
cert/ca_pool.go
296
cert/ca_pool.go
@ -1,296 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CAPool struct {
|
|
||||||
CAs map[string]*CachedCertificate
|
|
||||||
certBlocklist map[string]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCAPool creates an empty CAPool
|
|
||||||
func NewCAPool() *CAPool {
|
|
||||||
ca := CAPool{
|
|
||||||
CAs: make(map[string]*CachedCertificate),
|
|
||||||
certBlocklist: make(map[string]struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ca
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCAPoolFromPEM will create a new CA pool from the provided
|
|
||||||
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
|
||||||
// If the pool contains any expired certificates, an ErrExpired will be
|
|
||||||
// returned along with the pool. The caller must handle any such errors.
|
|
||||||
func NewCAPoolFromPEM(caPEMs []byte) (*CAPool, error) {
|
|
||||||
pool := NewCAPool()
|
|
||||||
var err error
|
|
||||||
var expired bool
|
|
||||||
for {
|
|
||||||
caPEMs, err = pool.AddCAFromPEM(caPEMs)
|
|
||||||
if errors.Is(err, ErrExpired) {
|
|
||||||
expired = true
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if expired {
|
|
||||||
return pool, ErrExpired
|
|
||||||
}
|
|
||||||
|
|
||||||
return pool, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddCAFromPEM verifies a Nebula CA certificate and adds it to the pool.
|
|
||||||
// Only the first pem encoded object will be consumed, any remaining bytes are returned.
|
|
||||||
// Parsed certificates will be verified and must be a CA
|
|
||||||
func (ncp *CAPool) AddCAFromPEM(pemBytes []byte) ([]byte, error) {
|
|
||||||
c, pemBytes, err := UnmarshalCertificateFromPEM(pemBytes)
|
|
||||||
if err != nil {
|
|
||||||
return pemBytes, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = ncp.AddCA(c)
|
|
||||||
if err != nil {
|
|
||||||
return pemBytes, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return pemBytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddCA verifies a Nebula CA certificate and adds it to the pool.
|
|
||||||
func (ncp *CAPool) AddCA(c Certificate) error {
|
|
||||||
if !c.IsCA() {
|
|
||||||
return fmt.Errorf("%s: %w", c.Name(), ErrNotCA)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.CheckSignature(c.PublicKey()) {
|
|
||||||
return fmt.Errorf("%s: %w", c.Name(), ErrNotSelfSigned)
|
|
||||||
}
|
|
||||||
|
|
||||||
sum, err := c.Fingerprint()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not calculate fingerprint for provided CA; error: %w; %s", err, c.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
cc := &CachedCertificate{
|
|
||||||
Certificate: c,
|
|
||||||
Fingerprint: sum,
|
|
||||||
InvertedGroups: make(map[string]struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, g := range c.Groups() {
|
|
||||||
cc.InvertedGroups[g] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
ncp.CAs[sum] = cc
|
|
||||||
|
|
||||||
if c.Expired(time.Now()) {
|
|
||||||
return fmt.Errorf("%s: %w", c.Name(), ErrExpired)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlocklistFingerprint adds a cert fingerprint to the blocklist
|
|
||||||
func (ncp *CAPool) BlocklistFingerprint(f string) {
|
|
||||||
ncp.certBlocklist[f] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetCertBlocklist removes all previously blocklisted cert fingerprints
|
|
||||||
func (ncp *CAPool) ResetCertBlocklist() {
|
|
||||||
ncp.certBlocklist = make(map[string]struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsBlocklisted tests the provided fingerprint against the pools blocklist.
|
|
||||||
// Returns true if the fingerprint is blocked.
|
|
||||||
func (ncp *CAPool) IsBlocklisted(fingerprint string) bool {
|
|
||||||
if _, ok := ncp.certBlocklist[fingerprint]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyCertificate verifies the certificate is valid and is signed by a trusted CA in the pool.
|
|
||||||
// If the certificate is valid then the returned CachedCertificate can be used in subsequent verification attempts
|
|
||||||
// to increase performance.
|
|
||||||
func (ncp *CAPool) VerifyCertificate(now time.Time, c Certificate) (*CachedCertificate, error) {
|
|
||||||
if c == nil {
|
|
||||||
return nil, fmt.Errorf("no certificate")
|
|
||||||
}
|
|
||||||
fp, err := c.Fingerprint()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not calculate fingerprint to verify: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer, err := ncp.verify(c, now, fp, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cc := CachedCertificate{
|
|
||||||
Certificate: c,
|
|
||||||
InvertedGroups: make(map[string]struct{}),
|
|
||||||
Fingerprint: fp,
|
|
||||||
signerFingerprint: signer.Fingerprint,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, g := range c.Groups() {
|
|
||||||
cc.InvertedGroups[g] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyCachedCertificate is the same as VerifyCertificate other than it operates on a pre-verified structure and
|
|
||||||
// is a cheaper operation to perform as a result.
|
|
||||||
func (ncp *CAPool) VerifyCachedCertificate(now time.Time, c *CachedCertificate) error {
|
|
||||||
_, err := ncp.verify(c.Certificate, now, c.Fingerprint, c.signerFingerprint)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ncp *CAPool) verify(c Certificate, now time.Time, certFp string, signerFp string) (*CachedCertificate, error) {
|
|
||||||
if ncp.IsBlocklisted(certFp) {
|
|
||||||
return nil, ErrBlockListed
|
|
||||||
}
|
|
||||||
|
|
||||||
signer, err := ncp.GetCAForCert(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if signer.Certificate.Expired(now) {
|
|
||||||
return nil, ErrRootExpired
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Expired(now) {
|
|
||||||
return nil, ErrExpired
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are checking a cached certificate then we can bail early here
|
|
||||||
// Either the root is no longer trusted or everything is fine
|
|
||||||
if len(signerFp) > 0 {
|
|
||||||
if signerFp != signer.Fingerprint {
|
|
||||||
return nil, ErrFingerprintMismatch
|
|
||||||
}
|
|
||||||
return signer, nil
|
|
||||||
}
|
|
||||||
if !c.CheckSignature(signer.Certificate.PublicKey()) {
|
|
||||||
return nil, ErrSignatureMismatch
|
|
||||||
}
|
|
||||||
|
|
||||||
err = CheckCAConstraints(signer.Certificate, c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return signer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCAForCert attempts to return the signing certificate for the provided certificate.
|
|
||||||
// No signature validation is performed
|
|
||||||
func (ncp *CAPool) GetCAForCert(c Certificate) (*CachedCertificate, error) {
|
|
||||||
issuer := c.Issuer()
|
|
||||||
if issuer == "" {
|
|
||||||
return nil, fmt.Errorf("no issuer in certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
signer, ok := ncp.CAs[issuer]
|
|
||||||
if ok {
|
|
||||||
return signer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, ErrCaNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFingerprints returns an array of trusted CA fingerprints
|
|
||||||
func (ncp *CAPool) GetFingerprints() []string {
|
|
||||||
fp := make([]string, len(ncp.CAs))
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
for k := range ncp.CAs {
|
|
||||||
fp[i] = k
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
return fp
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckCAConstraints returns an error if the sub certificate violates constraints present in the signer certificate.
|
|
||||||
func CheckCAConstraints(signer Certificate, sub Certificate) error {
|
|
||||||
return checkCAConstraints(signer, sub.NotBefore(), sub.NotAfter(), sub.Groups(), sub.Networks(), sub.UnsafeNetworks())
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkCAConstraints is a very generic function allowing both Certificates and TBSCertificates to be tested.
|
|
||||||
func checkCAConstraints(signer Certificate, notBefore, notAfter time.Time, groups []string, networks, unsafeNetworks []netip.Prefix) error {
|
|
||||||
// Make sure this cert isn't valid after the root
|
|
||||||
if notAfter.After(signer.NotAfter()) {
|
|
||||||
return fmt.Errorf("certificate expires after signing certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure this cert wasn't valid before the root
|
|
||||||
if notBefore.Before(signer.NotBefore()) {
|
|
||||||
return fmt.Errorf("certificate is valid before the signing certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the signer has a limited set of groups make sure the cert only contains a subset
|
|
||||||
signerGroups := signer.Groups()
|
|
||||||
if len(signerGroups) > 0 {
|
|
||||||
for _, g := range groups {
|
|
||||||
if !slices.Contains(signerGroups, g) {
|
|
||||||
return fmt.Errorf("certificate contained a group not present on the signing ca: %s", g)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the signer has a limited set of ip ranges to issue from make sure the cert only contains a subset
|
|
||||||
signingNetworks := signer.Networks()
|
|
||||||
if len(signingNetworks) > 0 {
|
|
||||||
for _, certNetwork := range networks {
|
|
||||||
found := false
|
|
||||||
for _, signingNetwork := range signingNetworks {
|
|
||||||
if signingNetwork.Contains(certNetwork.Addr()) && signingNetwork.Bits() <= certNetwork.Bits() {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
return fmt.Errorf("certificate contained a network assignment outside the limitations of the signing ca: %s", certNetwork.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the signer has a limited set of subnet ranges to issue from make sure the cert only contains a subset
|
|
||||||
signingUnsafeNetworks := signer.UnsafeNetworks()
|
|
||||||
if len(signingUnsafeNetworks) > 0 {
|
|
||||||
for _, certUnsafeNetwork := range unsafeNetworks {
|
|
||||||
found := false
|
|
||||||
for _, caNetwork := range signingUnsafeNetworks {
|
|
||||||
if caNetwork.Contains(certUnsafeNetwork.Addr()) && caNetwork.Bits() <= certUnsafeNetwork.Bits() {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
return fmt.Errorf("certificate contained an unsafe network assignment outside the limitations of the signing ca: %s", certUnsafeNetwork.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,560 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewCAPoolFromBytes(t *testing.T) {
|
|
||||||
noNewLines := `
|
|
||||||
# Current provisional, Remove once everything moves over to the real root.
|
|
||||||
-----BEGIN NEBULA CERTIFICATE-----
|
|
||||||
Cj4KDm5lYnVsYSByb290IGNhKM0cMM24zPCvBzogV24YEw5YiqeI/oYo8XXFsoo+
|
|
||||||
PBmiOafNJhLacf9rsspAARJAz9OAnh8TKAUKix1kKVMyQU4iM3LsFfZRf6ODWXIf
|
|
||||||
2qWMpB6fpd3PSoVYziPoOt2bIHIFLlgRLPJz3I3xBEdBCQ==
|
|
||||||
-----END NEBULA CERTIFICATE-----
|
|
||||||
# root-ca01
|
|
||||||
-----BEGIN NEBULA CERTIFICATE-----
|
|
||||||
CkEKEW5lYnVsYSByb290IGNhIDAxKM0cMM24zPCvBzogPzbWTxt8ZgXPQEwup7Br
|
|
||||||
BrtIt1O0q5AuTRT3+t2x1VJAARJAZ+2ib23qBXjdy49oU1YysrwuKkWWKrtJ7Jye
|
|
||||||
rFBQpDXikOukhQD/mfkloFwJ+Yjsfru7IpTN4ZfjXL+kN/2sCA==
|
|
||||||
-----END NEBULA CERTIFICATE-----
|
|
||||||
`
|
|
||||||
|
|
||||||
withNewLines := `
|
|
||||||
# Current provisional, Remove once everything moves over to the real root.
|
|
||||||
|
|
||||||
-----BEGIN NEBULA CERTIFICATE-----
|
|
||||||
Cj4KDm5lYnVsYSByb290IGNhKM0cMM24zPCvBzogV24YEw5YiqeI/oYo8XXFsoo+
|
|
||||||
PBmiOafNJhLacf9rsspAARJAz9OAnh8TKAUKix1kKVMyQU4iM3LsFfZRf6ODWXIf
|
|
||||||
2qWMpB6fpd3PSoVYziPoOt2bIHIFLlgRLPJz3I3xBEdBCQ==
|
|
||||||
-----END NEBULA CERTIFICATE-----
|
|
||||||
|
|
||||||
# root-ca01
|
|
||||||
|
|
||||||
|
|
||||||
-----BEGIN NEBULA CERTIFICATE-----
|
|
||||||
CkEKEW5lYnVsYSByb290IGNhIDAxKM0cMM24zPCvBzogPzbWTxt8ZgXPQEwup7Br
|
|
||||||
BrtIt1O0q5AuTRT3+t2x1VJAARJAZ+2ib23qBXjdy49oU1YysrwuKkWWKrtJ7Jye
|
|
||||||
rFBQpDXikOukhQD/mfkloFwJ+Yjsfru7IpTN4ZfjXL+kN/2sCA==
|
|
||||||
-----END NEBULA CERTIFICATE-----
|
|
||||||
|
|
||||||
`
|
|
||||||
|
|
||||||
expired := `
|
|
||||||
# expired certificate
|
|
||||||
-----BEGIN NEBULA CERTIFICATE-----
|
|
||||||
CjMKB2V4cGlyZWQozRwwzRw6ICJSG94CqX8wn5I65Pwn25V6HftVfWeIySVtp2DA
|
|
||||||
7TY/QAESQMaAk5iJT5EnQwK524ZaaHGEJLUqqbh5yyOHhboIGiVTWkFeH3HccTW8
|
|
||||||
Tq5a8AyWDQdfXbtEZ1FwabeHfH5Asw0=
|
|
||||||
-----END NEBULA CERTIFICATE-----
|
|
||||||
`
|
|
||||||
|
|
||||||
p256 := `
|
|
||||||
# p256 certificate
|
|
||||||
-----BEGIN NEBULA CERTIFICATE-----
|
|
||||||
CmQKEG5lYnVsYSBQMjU2IHRlc3QozRwwzbjM8K8HOkEEdrmmg40zQp44AkMq6DZp
|
|
||||||
k+coOv04r+zh33ISyhbsafnYduN17p2eD7CmHvHuerguXD9f32gcxo/KsFCKEjMe
|
|
||||||
+0ABoAYBEkcwRQIgVoTg38L7uWku9xQgsr06kxZ/viQLOO/w1Qj1vFUEnhcCIQCq
|
|
||||||
75SjTiV92kv/1GcbT3wWpAZQQDBiUHVMVmh1822szA==
|
|
||||||
-----END NEBULA CERTIFICATE-----
|
|
||||||
`
|
|
||||||
|
|
||||||
rootCA := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: "nebula root ca",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
rootCA01 := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: "nebula root ca 01",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
rootCAP256 := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: "nebula P256 test",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := NewCAPoolFromPEM([]byte(noNewLines))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, p.CAs["ce4e6c7a596996eb0d82a8875f0f0137a4b53ce22d2421c9fd7150e7a26f6300"].Certificate.Name(), rootCA.details.name)
|
|
||||||
assert.Equal(t, p.CAs["04c585fcd9a49b276df956a22b7ebea3bf23f1fca5a17c0b56ce2e626631969e"].Certificate.Name(), rootCA01.details.name)
|
|
||||||
|
|
||||||
pp, err := NewCAPoolFromPEM([]byte(withNewLines))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, pp.CAs["ce4e6c7a596996eb0d82a8875f0f0137a4b53ce22d2421c9fd7150e7a26f6300"].Certificate.Name(), rootCA.details.name)
|
|
||||||
assert.Equal(t, pp.CAs["04c585fcd9a49b276df956a22b7ebea3bf23f1fca5a17c0b56ce2e626631969e"].Certificate.Name(), rootCA01.details.name)
|
|
||||||
|
|
||||||
// expired cert, no valid certs
|
|
||||||
ppp, err := NewCAPoolFromPEM([]byte(expired))
|
|
||||||
assert.Equal(t, ErrExpired, err)
|
|
||||||
assert.Equal(t, "expired", ppp.CAs["c39b35a0e8f246203fe4f32b9aa8bfd155f1ae6a6be9d78370641e43397f48f5"].Certificate.Name())
|
|
||||||
|
|
||||||
// expired cert, with valid certs
|
|
||||||
pppp, err := NewCAPoolFromPEM(append([]byte(expired), noNewLines...))
|
|
||||||
assert.Equal(t, ErrExpired, err)
|
|
||||||
assert.Equal(t, pppp.CAs["ce4e6c7a596996eb0d82a8875f0f0137a4b53ce22d2421c9fd7150e7a26f6300"].Certificate.Name(), rootCA.details.name)
|
|
||||||
assert.Equal(t, pppp.CAs["04c585fcd9a49b276df956a22b7ebea3bf23f1fca5a17c0b56ce2e626631969e"].Certificate.Name(), rootCA01.details.name)
|
|
||||||
assert.Equal(t, "expired", pppp.CAs["c39b35a0e8f246203fe4f32b9aa8bfd155f1ae6a6be9d78370641e43397f48f5"].Certificate.Name())
|
|
||||||
assert.Len(t, pppp.CAs, 3)
|
|
||||||
|
|
||||||
ppppp, err := NewCAPoolFromPEM([]byte(p256))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, ppppp.CAs["552bf7d99bec1fc775a0e4c324bf6d8f789b3078f1919c7960d2e5e0c351ee97"].Certificate.Name(), rootCAP256.details.name)
|
|
||||||
assert.Len(t, ppppp.CAs, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_Verify(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
|
||||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test cert", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
require.NoError(t, caPool.AddCA(ca))
|
|
||||||
|
|
||||||
f, err := c.Fingerprint()
|
|
||||||
require.NoError(t, err)
|
|
||||||
caPool.BlocklistFingerprint(f)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.EqualError(t, err, "certificate is in the block list")
|
|
||||||
|
|
||||||
caPool.ResetCertBlocklist()
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
|
||||||
require.EqualError(t, err, "root certificate is expired")
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test cert2", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test group assertion
|
|
||||||
ca, _, caKey, _ = NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool = NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
|
||||||
})
|
|
||||||
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test2", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_VerifyP256(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
|
||||||
c, _, _, _ := NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
require.NoError(t, caPool.AddCA(ca))
|
|
||||||
|
|
||||||
f, err := c.Fingerprint()
|
|
||||||
require.NoError(t, err)
|
|
||||||
caPool.BlocklistFingerprint(f)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.EqualError(t, err, "certificate is in the block list")
|
|
||||||
|
|
||||||
caPool.ResetCertBlocklist()
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
|
||||||
require.EqualError(t, err, "root certificate is expired")
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
|
||||||
NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test group assertion
|
|
||||||
ca, _, caKey, _ = NewTestCaCert(Version1, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool = NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
|
||||||
NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
|
||||||
})
|
|
||||||
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_Verify_IPs(t *testing.T) {
|
|
||||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
|
||||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
|
||||||
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
// ip is outside the network
|
|
||||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is outside the network reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip and mask are within the network
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
|
||||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp2, caIp1}, nil, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed with just 1
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1}, nil, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_Verify_Subnets(t *testing.T) {
|
|
||||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
|
||||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
|
||||||
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
// ip is outside the network
|
|
||||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is outside the network reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip and mask are within the network
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
|
||||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp2, caIp1}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed with just 1
|
|
||||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_Verify(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
|
||||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test cert", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
require.NoError(t, caPool.AddCA(ca))
|
|
||||||
|
|
||||||
f, err := c.Fingerprint()
|
|
||||||
require.NoError(t, err)
|
|
||||||
caPool.BlocklistFingerprint(f)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.EqualError(t, err, "certificate is in the block list")
|
|
||||||
|
|
||||||
caPool.ResetCertBlocklist()
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
|
||||||
require.EqualError(t, err, "root certificate is expired")
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test cert2", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test group assertion
|
|
||||||
ca, _, caKey, _ = NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool = NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
|
||||||
})
|
|
||||||
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test2", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_VerifyP256(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
|
||||||
c, _, _, _ := NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
require.NoError(t, caPool.AddCA(ca))
|
|
||||||
|
|
||||||
f, err := c.Fingerprint()
|
|
||||||
require.NoError(t, err)
|
|
||||||
caPool.BlocklistFingerprint(f)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.EqualError(t, err, "certificate is in the block list")
|
|
||||||
|
|
||||||
caPool.ResetCertBlocklist()
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
|
||||||
require.EqualError(t, err, "root certificate is expired")
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
|
||||||
NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test group assertion
|
|
||||||
ca, _, caKey, _ = NewTestCaCert(Version2, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool = NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
|
||||||
NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
|
||||||
})
|
|
||||||
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_Verify_IPs(t *testing.T) {
|
|
||||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
|
||||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
|
||||||
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
// ip is outside the network
|
|
||||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is outside the network reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip and mask are within the network
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
|
||||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp2, caIp1}, nil, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed with just 1
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1}, nil, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_Verify_Subnets(t *testing.T) {
|
|
||||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
|
||||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
|
||||||
|
|
||||||
caPem, err := ca.MarshalPEM()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
caPool := NewCAPool()
|
|
||||||
b, err := caPool.AddCAFromPEM(caPem)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
|
|
||||||
// ip is outside the network
|
|
||||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is outside the network reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip is within the network but mask is outside reversed order of above
|
|
||||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
|
||||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
|
||||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ip and mask are within the network
|
|
||||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
|
||||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
|
||||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp2, caIp1}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Exact matches reversed with just 1
|
|
||||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1}, []string{"test"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
1130
cert/cert.go
1130
cert/cert.go
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,8 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.30.0
|
||||||
// protoc v3.21.5
|
// protoc v3.21.5
|
||||||
// source: cert_v1.proto
|
// source: cert.proto
|
||||||
|
|
||||||
package cert
|
package cert
|
||||||
|
|
||||||
@ -50,11 +50,11 @@ func (x Curve) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (Curve) Descriptor() protoreflect.EnumDescriptor {
|
func (Curve) Descriptor() protoreflect.EnumDescriptor {
|
||||||
return file_cert_v1_proto_enumTypes[0].Descriptor()
|
return file_cert_proto_enumTypes[0].Descriptor()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Curve) Type() protoreflect.EnumType {
|
func (Curve) Type() protoreflect.EnumType {
|
||||||
return &file_cert_v1_proto_enumTypes[0]
|
return &file_cert_proto_enumTypes[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x Curve) Number() protoreflect.EnumNumber {
|
func (x Curve) Number() protoreflect.EnumNumber {
|
||||||
@ -63,7 +63,7 @@ func (x Curve) Number() protoreflect.EnumNumber {
|
|||||||
|
|
||||||
// Deprecated: Use Curve.Descriptor instead.
|
// Deprecated: Use Curve.Descriptor instead.
|
||||||
func (Curve) EnumDescriptor() ([]byte, []int) {
|
func (Curve) EnumDescriptor() ([]byte, []int) {
|
||||||
return file_cert_v1_proto_rawDescGZIP(), []int{0}
|
return file_cert_proto_rawDescGZIP(), []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
type RawNebulaCertificate struct {
|
type RawNebulaCertificate struct {
|
||||||
@ -78,7 +78,7 @@ type RawNebulaCertificate struct {
|
|||||||
func (x *RawNebulaCertificate) Reset() {
|
func (x *RawNebulaCertificate) Reset() {
|
||||||
*x = RawNebulaCertificate{}
|
*x = RawNebulaCertificate{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_cert_v1_proto_msgTypes[0]
|
mi := &file_cert_proto_msgTypes[0]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
@ -91,7 +91,7 @@ func (x *RawNebulaCertificate) String() string {
|
|||||||
func (*RawNebulaCertificate) ProtoMessage() {}
|
func (*RawNebulaCertificate) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
|
func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_cert_v1_proto_msgTypes[0]
|
mi := &file_cert_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
@ -104,7 +104,7 @@ func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
|
|||||||
|
|
||||||
// Deprecated: Use RawNebulaCertificate.ProtoReflect.Descriptor instead.
|
// Deprecated: Use RawNebulaCertificate.ProtoReflect.Descriptor instead.
|
||||||
func (*RawNebulaCertificate) Descriptor() ([]byte, []int) {
|
func (*RawNebulaCertificate) Descriptor() ([]byte, []int) {
|
||||||
return file_cert_v1_proto_rawDescGZIP(), []int{0}
|
return file_cert_proto_rawDescGZIP(), []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
|
func (x *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
|
||||||
@ -143,7 +143,7 @@ type RawNebulaCertificateDetails struct {
|
|||||||
func (x *RawNebulaCertificateDetails) Reset() {
|
func (x *RawNebulaCertificateDetails) Reset() {
|
||||||
*x = RawNebulaCertificateDetails{}
|
*x = RawNebulaCertificateDetails{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_cert_v1_proto_msgTypes[1]
|
mi := &file_cert_proto_msgTypes[1]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
@ -156,7 +156,7 @@ func (x *RawNebulaCertificateDetails) String() string {
|
|||||||
func (*RawNebulaCertificateDetails) ProtoMessage() {}
|
func (*RawNebulaCertificateDetails) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
|
func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_cert_v1_proto_msgTypes[1]
|
mi := &file_cert_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
@ -169,7 +169,7 @@ func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
|
|||||||
|
|
||||||
// Deprecated: Use RawNebulaCertificateDetails.ProtoReflect.Descriptor instead.
|
// Deprecated: Use RawNebulaCertificateDetails.ProtoReflect.Descriptor instead.
|
||||||
func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) {
|
func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) {
|
||||||
return file_cert_v1_proto_rawDescGZIP(), []int{1}
|
return file_cert_proto_rawDescGZIP(), []int{1}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RawNebulaCertificateDetails) GetName() string {
|
func (x *RawNebulaCertificateDetails) GetName() string {
|
||||||
@ -254,7 +254,7 @@ type RawNebulaEncryptedData struct {
|
|||||||
func (x *RawNebulaEncryptedData) Reset() {
|
func (x *RawNebulaEncryptedData) Reset() {
|
||||||
*x = RawNebulaEncryptedData{}
|
*x = RawNebulaEncryptedData{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_cert_v1_proto_msgTypes[2]
|
mi := &file_cert_proto_msgTypes[2]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
@ -267,7 +267,7 @@ func (x *RawNebulaEncryptedData) String() string {
|
|||||||
func (*RawNebulaEncryptedData) ProtoMessage() {}
|
func (*RawNebulaEncryptedData) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message {
|
func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_cert_v1_proto_msgTypes[2]
|
mi := &file_cert_proto_msgTypes[2]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
@ -280,7 +280,7 @@ func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message {
|
|||||||
|
|
||||||
// Deprecated: Use RawNebulaEncryptedData.ProtoReflect.Descriptor instead.
|
// Deprecated: Use RawNebulaEncryptedData.ProtoReflect.Descriptor instead.
|
||||||
func (*RawNebulaEncryptedData) Descriptor() ([]byte, []int) {
|
func (*RawNebulaEncryptedData) Descriptor() ([]byte, []int) {
|
||||||
return file_cert_v1_proto_rawDescGZIP(), []int{2}
|
return file_cert_proto_rawDescGZIP(), []int{2}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RawNebulaEncryptedData) GetEncryptionMetadata() *RawNebulaEncryptionMetadata {
|
func (x *RawNebulaEncryptedData) GetEncryptionMetadata() *RawNebulaEncryptionMetadata {
|
||||||
@ -309,7 +309,7 @@ type RawNebulaEncryptionMetadata struct {
|
|||||||
func (x *RawNebulaEncryptionMetadata) Reset() {
|
func (x *RawNebulaEncryptionMetadata) Reset() {
|
||||||
*x = RawNebulaEncryptionMetadata{}
|
*x = RawNebulaEncryptionMetadata{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_cert_v1_proto_msgTypes[3]
|
mi := &file_cert_proto_msgTypes[3]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
@ -322,7 +322,7 @@ func (x *RawNebulaEncryptionMetadata) String() string {
|
|||||||
func (*RawNebulaEncryptionMetadata) ProtoMessage() {}
|
func (*RawNebulaEncryptionMetadata) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
|
func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_cert_v1_proto_msgTypes[3]
|
mi := &file_cert_proto_msgTypes[3]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
@ -335,7 +335,7 @@ func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
|
|||||||
|
|
||||||
// Deprecated: Use RawNebulaEncryptionMetadata.ProtoReflect.Descriptor instead.
|
// Deprecated: Use RawNebulaEncryptionMetadata.ProtoReflect.Descriptor instead.
|
||||||
func (*RawNebulaEncryptionMetadata) Descriptor() ([]byte, []int) {
|
func (*RawNebulaEncryptionMetadata) Descriptor() ([]byte, []int) {
|
||||||
return file_cert_v1_proto_rawDescGZIP(), []int{3}
|
return file_cert_proto_rawDescGZIP(), []int{3}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RawNebulaEncryptionMetadata) GetEncryptionAlgorithm() string {
|
func (x *RawNebulaEncryptionMetadata) GetEncryptionAlgorithm() string {
|
||||||
@ -367,7 +367,7 @@ type RawNebulaArgon2Parameters struct {
|
|||||||
func (x *RawNebulaArgon2Parameters) Reset() {
|
func (x *RawNebulaArgon2Parameters) Reset() {
|
||||||
*x = RawNebulaArgon2Parameters{}
|
*x = RawNebulaArgon2Parameters{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_cert_v1_proto_msgTypes[4]
|
mi := &file_cert_proto_msgTypes[4]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
@ -380,7 +380,7 @@ func (x *RawNebulaArgon2Parameters) String() string {
|
|||||||
func (*RawNebulaArgon2Parameters) ProtoMessage() {}
|
func (*RawNebulaArgon2Parameters) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
|
func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_cert_v1_proto_msgTypes[4]
|
mi := &file_cert_proto_msgTypes[4]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
@ -393,7 +393,7 @@ func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
|
|||||||
|
|
||||||
// Deprecated: Use RawNebulaArgon2Parameters.ProtoReflect.Descriptor instead.
|
// Deprecated: Use RawNebulaArgon2Parameters.ProtoReflect.Descriptor instead.
|
||||||
func (*RawNebulaArgon2Parameters) Descriptor() ([]byte, []int) {
|
func (*RawNebulaArgon2Parameters) Descriptor() ([]byte, []int) {
|
||||||
return file_cert_v1_proto_rawDescGZIP(), []int{4}
|
return file_cert_proto_rawDescGZIP(), []int{4}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RawNebulaArgon2Parameters) GetVersion() int32 {
|
func (x *RawNebulaArgon2Parameters) GetVersion() int32 {
|
||||||
@ -431,87 +431,87 @@ func (x *RawNebulaArgon2Parameters) GetSalt() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var File_cert_v1_proto protoreflect.FileDescriptor
|
var File_cert_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_cert_v1_proto_rawDesc = []byte{
|
var file_cert_proto_rawDesc = []byte{
|
||||||
0x0a, 0x0d, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x76, 0x31, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x65,
|
||||||
0x04, 0x63, 0x65, 0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43,
|
||||||
0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a,
|
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x44, 0x65,
|
||||||
0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
|
0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
|
||||||
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43,
|
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74,
|
||||||
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
|
||||||
0x73, 0x52, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69,
|
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
||||||
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53,
|
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
|
||||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x52, 0x61, 0x77,
|
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
||||||
0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
|
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
|
||||||
0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65,
|
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03,
|
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
|
||||||
0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18,
|
0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x53,
|
||||||
0x0a, 0x07, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52,
|
0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x53, 0x75,
|
||||||
0x07, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75,
|
0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18,
|
||||||
0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73,
|
0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x0a,
|
||||||
0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20,
|
0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
|
||||||
0x01, 0x28, 0x03, 0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a,
|
0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4e,
|
||||||
0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
|
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x4e,
|
||||||
0x52, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75,
|
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
||||||
0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50,
|
0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x75, 0x62, 0x6c,
|
||||||
0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41,
|
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
|
||||||
0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06,
|
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
|
||||||
0x49, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73,
|
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
|
||||||
0x73, 0x75, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x64, 0x20,
|
0x72, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0e,
|
||||||
0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x75, 0x72, 0x76, 0x65,
|
0x32, 0x0b, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x75, 0x72, 0x76, 0x65, 0x52, 0x05, 0x63,
|
||||||
0x52, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x61, 0x77, 0x4e,
|
0x75, 0x72, 0x76, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
||||||
0x65, 0x62, 0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61,
|
0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
|
||||||
0x74, 0x61, 0x12, 0x51, 0x0a, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
0x51, 0x0a, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
||||||
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
|
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
|
||||||
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x45,
|
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72,
|
||||||
0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
|
0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12,
|
||||||
0x61, 0x52, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||||
0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74,
|
0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74,
|
||||||
0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65,
|
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
|
||||||
0x72, 0x74, 0x65, 0x78, 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
0x78, 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61,
|
||||||
0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||||
0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
|
0x74, 0x61, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01,
|
0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
0x28, 0x09, 0x52, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c,
|
0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
|
||||||
0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x4b, 0x0a, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e,
|
0x69, 0x74, 0x68, 0x6d, 0x12, 0x4b, 0x0a, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61,
|
||||||
0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
|
0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
|
||||||
0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||||
0x6c, 0x61, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
|
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52,
|
||||||
0x72, 0x73, 0x52, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
|
0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
|
||||||
0x74, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
0x73, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||||
0x6c, 0x61, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
|
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12,
|
||||||
0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
|
0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||||
0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06,
|
0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d,
|
||||||
0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6d, 0x65,
|
0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72,
|
||||||
0x6d, 0x6f, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c,
|
0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d,
|
||||||
0x69, 0x73, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c,
|
0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c,
|
||||||
0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74,
|
0x69, 0x73, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x72,
|
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x05,
|
0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x2a, 0x21, 0x0a, 0x05, 0x43, 0x75,
|
0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x2a, 0x21, 0x0a, 0x05, 0x43, 0x75, 0x72, 0x76, 0x65,
|
||||||
0x72, 0x76, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x52, 0x56, 0x45, 0x32, 0x35, 0x35, 0x31,
|
0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x52, 0x56, 0x45, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x00,
|
||||||
0x39, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x20, 0x5a,
|
0x12, 0x08, 0x0a, 0x04, 0x50, 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69,
|
||||||
0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63,
|
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71,
|
||||||
0x6b, 0x68, 0x71, 0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62,
|
0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72,
|
||||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
file_cert_v1_proto_rawDescOnce sync.Once
|
file_cert_proto_rawDescOnce sync.Once
|
||||||
file_cert_v1_proto_rawDescData = file_cert_v1_proto_rawDesc
|
file_cert_proto_rawDescData = file_cert_proto_rawDesc
|
||||||
)
|
)
|
||||||
|
|
||||||
func file_cert_v1_proto_rawDescGZIP() []byte {
|
func file_cert_proto_rawDescGZIP() []byte {
|
||||||
file_cert_v1_proto_rawDescOnce.Do(func() {
|
file_cert_proto_rawDescOnce.Do(func() {
|
||||||
file_cert_v1_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_v1_proto_rawDescData)
|
file_cert_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_proto_rawDescData)
|
||||||
})
|
})
|
||||||
return file_cert_v1_proto_rawDescData
|
return file_cert_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_cert_v1_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
var file_cert_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||||
var file_cert_v1_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||||
var file_cert_v1_proto_goTypes = []any{
|
var file_cert_proto_goTypes = []interface{}{
|
||||||
(Curve)(0), // 0: cert.Curve
|
(Curve)(0), // 0: cert.Curve
|
||||||
(*RawNebulaCertificate)(nil), // 1: cert.RawNebulaCertificate
|
(*RawNebulaCertificate)(nil), // 1: cert.RawNebulaCertificate
|
||||||
(*RawNebulaCertificateDetails)(nil), // 2: cert.RawNebulaCertificateDetails
|
(*RawNebulaCertificateDetails)(nil), // 2: cert.RawNebulaCertificateDetails
|
||||||
@ -519,7 +519,7 @@ var file_cert_v1_proto_goTypes = []any{
|
|||||||
(*RawNebulaEncryptionMetadata)(nil), // 4: cert.RawNebulaEncryptionMetadata
|
(*RawNebulaEncryptionMetadata)(nil), // 4: cert.RawNebulaEncryptionMetadata
|
||||||
(*RawNebulaArgon2Parameters)(nil), // 5: cert.RawNebulaArgon2Parameters
|
(*RawNebulaArgon2Parameters)(nil), // 5: cert.RawNebulaArgon2Parameters
|
||||||
}
|
}
|
||||||
var file_cert_v1_proto_depIdxs = []int32{
|
var file_cert_proto_depIdxs = []int32{
|
||||||
2, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
2, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
||||||
0, // 1: cert.RawNebulaCertificateDetails.curve:type_name -> cert.Curve
|
0, // 1: cert.RawNebulaCertificateDetails.curve:type_name -> cert.Curve
|
||||||
4, // 2: cert.RawNebulaEncryptedData.EncryptionMetadata:type_name -> cert.RawNebulaEncryptionMetadata
|
4, // 2: cert.RawNebulaEncryptedData.EncryptionMetadata:type_name -> cert.RawNebulaEncryptionMetadata
|
||||||
@ -531,13 +531,13 @@ var file_cert_v1_proto_depIdxs = []int32{
|
|||||||
0, // [0:4] is the sub-list for field type_name
|
0, // [0:4] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { file_cert_v1_proto_init() }
|
func init() { file_cert_proto_init() }
|
||||||
func file_cert_v1_proto_init() {
|
func file_cert_proto_init() {
|
||||||
if File_cert_v1_proto != nil {
|
if File_cert_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
if !protoimpl.UnsafeEnabled {
|
||||||
file_cert_v1_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
file_cert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*RawNebulaCertificate); i {
|
switch v := v.(*RawNebulaCertificate); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
@ -549,7 +549,7 @@ func file_cert_v1_proto_init() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_cert_v1_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
file_cert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*RawNebulaCertificateDetails); i {
|
switch v := v.(*RawNebulaCertificateDetails); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
@ -561,7 +561,7 @@ func file_cert_v1_proto_init() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_cert_v1_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
file_cert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*RawNebulaEncryptedData); i {
|
switch v := v.(*RawNebulaEncryptedData); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
@ -573,7 +573,7 @@ func file_cert_v1_proto_init() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_cert_v1_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
file_cert_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*RawNebulaEncryptionMetadata); i {
|
switch v := v.(*RawNebulaEncryptionMetadata); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
@ -585,7 +585,7 @@ func file_cert_v1_proto_init() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_cert_v1_proto_msgTypes[4].Exporter = func(v any, i int) any {
|
file_cert_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*RawNebulaArgon2Parameters); i {
|
switch v := v.(*RawNebulaArgon2Parameters); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
@ -602,19 +602,19 @@ func file_cert_v1_proto_init() {
|
|||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_cert_v1_proto_rawDesc,
|
RawDescriptor: file_cert_proto_rawDesc,
|
||||||
NumEnums: 1,
|
NumEnums: 1,
|
||||||
NumMessages: 5,
|
NumMessages: 5,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
NumServices: 0,
|
NumServices: 0,
|
||||||
},
|
},
|
||||||
GoTypes: file_cert_v1_proto_goTypes,
|
GoTypes: file_cert_proto_goTypes,
|
||||||
DependencyIndexes: file_cert_v1_proto_depIdxs,
|
DependencyIndexes: file_cert_proto_depIdxs,
|
||||||
EnumInfos: file_cert_v1_proto_enumTypes,
|
EnumInfos: file_cert_proto_enumTypes,
|
||||||
MessageInfos: file_cert_v1_proto_msgTypes,
|
MessageInfos: file_cert_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_cert_v1_proto = out.File
|
File_cert_proto = out.File
|
||||||
file_cert_v1_proto_rawDesc = nil
|
file_cert_proto_rawDesc = nil
|
||||||
file_cert_v1_proto_goTypes = nil
|
file_cert_proto_goTypes = nil
|
||||||
file_cert_v1_proto_depIdxs = nil
|
file_cert_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
1251
cert/cert_test.go
Normal file
1251
cert/cert_test.go
Normal file
File diff suppressed because it is too large
Load Diff
489
cert/cert_v1.go
489
cert/cert_v1.go
@ -1,489 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/ecdh"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/binary"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/netip"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/curve25519"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
const publicKeyLen = 32
|
|
||||||
|
|
||||||
type certificateV1 struct {
|
|
||||||
details detailsV1
|
|
||||||
signature []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type detailsV1 struct {
|
|
||||||
name string
|
|
||||||
networks []netip.Prefix
|
|
||||||
unsafeNetworks []netip.Prefix
|
|
||||||
groups []string
|
|
||||||
notBefore time.Time
|
|
||||||
notAfter time.Time
|
|
||||||
publicKey []byte
|
|
||||||
isCA bool
|
|
||||||
issuer string
|
|
||||||
|
|
||||||
curve Curve
|
|
||||||
}
|
|
||||||
|
|
||||||
type m = map[string]any
|
|
||||||
|
|
||||||
func (c *certificateV1) Version() Version {
|
|
||||||
return Version1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Curve() Curve {
|
|
||||||
return c.details.curve
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Groups() []string {
|
|
||||||
return c.details.groups
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) IsCA() bool {
|
|
||||||
return c.details.isCA
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Issuer() string {
|
|
||||||
return c.details.issuer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Name() string {
|
|
||||||
return c.details.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Networks() []netip.Prefix {
|
|
||||||
return c.details.networks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) NotAfter() time.Time {
|
|
||||||
return c.details.notAfter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) NotBefore() time.Time {
|
|
||||||
return c.details.notBefore
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) PublicKey() []byte {
|
|
||||||
return c.details.publicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Signature() []byte {
|
|
||||||
return c.signature
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) UnsafeNetworks() []netip.Prefix {
|
|
||||||
return c.details.unsafeNetworks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Fingerprint() (string, error) {
|
|
||||||
b, err := c.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
sum := sha256.Sum256(b)
|
|
||||||
return hex.EncodeToString(sum[:]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) CheckSignature(key []byte) bool {
|
|
||||||
b, err := proto.Marshal(c.getRawDetails())
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch c.details.curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
return ed25519.Verify(key, b, c.signature)
|
|
||||||
case Curve_P256:
|
|
||||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
|
||||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
|
||||||
hashed := sha256.Sum256(b)
|
|
||||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Expired(t time.Time) bool {
|
|
||||||
return c.details.notBefore.After(t) || c.details.notAfter.Before(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) VerifyPrivateKey(curve Curve, key []byte) error {
|
|
||||||
if curve != c.details.curve {
|
|
||||||
return fmt.Errorf("curve in cert and private key supplied don't match")
|
|
||||||
}
|
|
||||||
if c.details.isCA {
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
// the call to PublicKey below will panic slice bounds out of range otherwise
|
|
||||||
if len(key) != ed25519.PrivateKeySize {
|
|
||||||
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ed25519.PublicKey(c.details.publicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
|
||||||
}
|
|
||||||
case Curve_P256:
|
|
||||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot parse private key as P256: %w", err)
|
|
||||||
}
|
|
||||||
pub := privkey.PublicKey().Bytes()
|
|
||||||
if !bytes.Equal(pub, c.details.publicKey) {
|
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid curve: %s", curve)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var pub []byte
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
var err error
|
|
||||||
pub, err = curve25519.X25519(key, curve25519.Basepoint)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case Curve_P256:
|
|
||||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pub = privkey.PublicKey().Bytes()
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid curve: %s", curve)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(pub, c.details.publicKey) {
|
|
||||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRawDetails marshals the raw details into protobuf ready struct
|
|
||||||
func (c *certificateV1) getRawDetails() *RawNebulaCertificateDetails {
|
|
||||||
rd := &RawNebulaCertificateDetails{
|
|
||||||
Name: c.details.name,
|
|
||||||
Groups: c.details.groups,
|
|
||||||
NotBefore: c.details.notBefore.Unix(),
|
|
||||||
NotAfter: c.details.notAfter.Unix(),
|
|
||||||
PublicKey: make([]byte, len(c.details.publicKey)),
|
|
||||||
IsCA: c.details.isCA,
|
|
||||||
Curve: c.details.curve,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ipNet := range c.details.networks {
|
|
||||||
mask := net.CIDRMask(ipNet.Bits(), ipNet.Addr().BitLen())
|
|
||||||
rd.Ips = append(rd.Ips, addr2int(ipNet.Addr()), ip2int(mask))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ipNet := range c.details.unsafeNetworks {
|
|
||||||
mask := net.CIDRMask(ipNet.Bits(), ipNet.Addr().BitLen())
|
|
||||||
rd.Subnets = append(rd.Subnets, addr2int(ipNet.Addr()), ip2int(mask))
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(rd.PublicKey, c.details.publicKey[:])
|
|
||||||
|
|
||||||
// I know, this is terrible
|
|
||||||
rd.Issuer, _ = hex.DecodeString(c.details.issuer)
|
|
||||||
|
|
||||||
return rd
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) String() string {
|
|
||||||
b, err := json.MarshalIndent(c.marshalJSON(), "", "\t")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Sprintf("<error marshalling certificate: %v>", err)
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) MarshalForHandshakes() ([]byte, error) {
|
|
||||||
pubKey := c.details.publicKey
|
|
||||||
c.details.publicKey = nil
|
|
||||||
rawCertNoKey, err := c.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.details.publicKey = pubKey
|
|
||||||
return rawCertNoKey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Marshal() ([]byte, error) {
|
|
||||||
rc := RawNebulaCertificate{
|
|
||||||
Details: c.getRawDetails(),
|
|
||||||
Signature: c.signature,
|
|
||||||
}
|
|
||||||
|
|
||||||
return proto.Marshal(&rc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) MarshalPEM() ([]byte, error) {
|
|
||||||
b, err := c.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: CertificateBanner, Bytes: b}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(c.marshalJSON())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) marshalJSON() m {
|
|
||||||
fp, _ := c.Fingerprint()
|
|
||||||
return m{
|
|
||||||
"version": Version1,
|
|
||||||
"details": m{
|
|
||||||
"name": c.details.name,
|
|
||||||
"networks": c.details.networks,
|
|
||||||
"unsafeNetworks": c.details.unsafeNetworks,
|
|
||||||
"groups": c.details.groups,
|
|
||||||
"notBefore": c.details.notBefore,
|
|
||||||
"notAfter": c.details.notAfter,
|
|
||||||
"publicKey": fmt.Sprintf("%x", c.details.publicKey),
|
|
||||||
"isCa": c.details.isCA,
|
|
||||||
"issuer": c.details.issuer,
|
|
||||||
"curve": c.details.curve.String(),
|
|
||||||
},
|
|
||||||
"fingerprint": fp,
|
|
||||||
"signature": fmt.Sprintf("%x", c.Signature()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) Copy() Certificate {
|
|
||||||
nc := &certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: c.details.name,
|
|
||||||
notBefore: c.details.notBefore,
|
|
||||||
notAfter: c.details.notAfter,
|
|
||||||
publicKey: make([]byte, len(c.details.publicKey)),
|
|
||||||
isCA: c.details.isCA,
|
|
||||||
issuer: c.details.issuer,
|
|
||||||
curve: c.details.curve,
|
|
||||||
},
|
|
||||||
signature: make([]byte, len(c.signature)),
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.details.groups != nil {
|
|
||||||
nc.details.groups = make([]string, len(c.details.groups))
|
|
||||||
copy(nc.details.groups, c.details.groups)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.details.networks != nil {
|
|
||||||
nc.details.networks = make([]netip.Prefix, len(c.details.networks))
|
|
||||||
copy(nc.details.networks, c.details.networks)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.details.unsafeNetworks != nil {
|
|
||||||
nc.details.unsafeNetworks = make([]netip.Prefix, len(c.details.unsafeNetworks))
|
|
||||||
copy(nc.details.unsafeNetworks, c.details.unsafeNetworks)
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(nc.signature, c.signature)
|
|
||||||
copy(nc.details.publicKey, c.details.publicKey)
|
|
||||||
|
|
||||||
return nc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) fromTBSCertificate(t *TBSCertificate) error {
|
|
||||||
c.details = detailsV1{
|
|
||||||
name: t.Name,
|
|
||||||
networks: t.Networks,
|
|
||||||
unsafeNetworks: t.UnsafeNetworks,
|
|
||||||
groups: t.Groups,
|
|
||||||
notBefore: t.NotBefore,
|
|
||||||
notAfter: t.NotAfter,
|
|
||||||
publicKey: t.PublicKey,
|
|
||||||
isCA: t.IsCA,
|
|
||||||
curve: t.Curve,
|
|
||||||
issuer: t.issuer,
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.validate()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) validate() error {
|
|
||||||
// Empty names are allowed
|
|
||||||
|
|
||||||
if len(c.details.publicKey) == 0 {
|
|
||||||
return ErrInvalidPublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Original v1 rules allowed multiple networks to be present but ignored all but the first one.
|
|
||||||
// Continue to allow this behavior
|
|
||||||
if !c.details.isCA && len(c.details.networks) == 0 {
|
|
||||||
return NewErrInvalidCertificateProperties("non-CA certificates must contain exactly one network")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, network := range c.details.networks {
|
|
||||||
if !network.IsValid() || !network.Addr().IsValid() {
|
|
||||||
return NewErrInvalidCertificateProperties("invalid network: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().Is6() {
|
|
||||||
return NewErrInvalidCertificateProperties("certificate may not contain IPv6 networks: %v", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().IsUnspecified() {
|
|
||||||
return NewErrInvalidCertificateProperties("non-CA certificates must not use the zero address as a network: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().Zone() != "" {
|
|
||||||
return NewErrInvalidCertificateProperties("networks may not contain zones: %s", network)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, network := range c.details.unsafeNetworks {
|
|
||||||
if !network.IsValid() || !network.Addr().IsValid() {
|
|
||||||
return NewErrInvalidCertificateProperties("invalid unsafe network: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().Is6() {
|
|
||||||
return NewErrInvalidCertificateProperties("certificate may not contain IPv6 unsafe networks: %v", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().Zone() != "" {
|
|
||||||
return NewErrInvalidCertificateProperties("unsafe networks may not contain zones: %s", network)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 doesn't bother with sort order or uniqueness of networks or unsafe networks.
|
|
||||||
// We can't modify the unmarshalled data because verification requires re-marshalling and a re-ordered
|
|
||||||
// unsafe networks would result in a different signature.
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) marshalForSigning() ([]byte, error) {
|
|
||||||
b, err := proto.Marshal(c.getRawDetails())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV1) setSignature(b []byte) error {
|
|
||||||
if len(b) == 0 {
|
|
||||||
return ErrEmptySignature
|
|
||||||
}
|
|
||||||
c.signature = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshalCertificateV1 will unmarshal a protobuf byte representation of a nebula cert
|
|
||||||
// if the publicKey is provided here then it is not required to be present in `b`
|
|
||||||
func unmarshalCertificateV1(b []byte, publicKey []byte) (*certificateV1, error) {
|
|
||||||
if len(b) == 0 {
|
|
||||||
return nil, fmt.Errorf("nil byte array")
|
|
||||||
}
|
|
||||||
var rc RawNebulaCertificate
|
|
||||||
err := proto.Unmarshal(b, &rc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if rc.Details == nil {
|
|
||||||
return nil, fmt.Errorf("encoded Details was nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rc.Details.Ips)%2 != 0 {
|
|
||||||
return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rc.Details.Subnets)%2 != 0 {
|
|
||||||
return nil, fmt.Errorf("encoded Subnets should be in pairs, an odd number was found")
|
|
||||||
}
|
|
||||||
|
|
||||||
nc := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: rc.Details.Name,
|
|
||||||
groups: make([]string, len(rc.Details.Groups)),
|
|
||||||
networks: make([]netip.Prefix, len(rc.Details.Ips)/2),
|
|
||||||
unsafeNetworks: make([]netip.Prefix, len(rc.Details.Subnets)/2),
|
|
||||||
notBefore: time.Unix(rc.Details.NotBefore, 0),
|
|
||||||
notAfter: time.Unix(rc.Details.NotAfter, 0),
|
|
||||||
publicKey: make([]byte, len(rc.Details.PublicKey)),
|
|
||||||
isCA: rc.Details.IsCA,
|
|
||||||
curve: rc.Details.Curve,
|
|
||||||
},
|
|
||||||
signature: make([]byte, len(rc.Signature)),
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(nc.signature, rc.Signature)
|
|
||||||
copy(nc.details.groups, rc.Details.Groups)
|
|
||||||
nc.details.issuer = hex.EncodeToString(rc.Details.Issuer)
|
|
||||||
|
|
||||||
if len(publicKey) > 0 {
|
|
||||||
nc.details.publicKey = publicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(nc.details.publicKey, rc.Details.PublicKey)
|
|
||||||
|
|
||||||
var ip netip.Addr
|
|
||||||
for i, rawIp := range rc.Details.Ips {
|
|
||||||
if i%2 == 0 {
|
|
||||||
ip = int2addr(rawIp)
|
|
||||||
} else {
|
|
||||||
ones, _ := net.IPMask(int2ip(rawIp)).Size()
|
|
||||||
nc.details.networks[i/2] = netip.PrefixFrom(ip, ones)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, rawIp := range rc.Details.Subnets {
|
|
||||||
if i%2 == 0 {
|
|
||||||
ip = int2addr(rawIp)
|
|
||||||
} else {
|
|
||||||
ones, _ := net.IPMask(int2ip(rawIp)).Size()
|
|
||||||
nc.details.unsafeNetworks[i/2] = netip.PrefixFrom(ip, ones)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = nc.validate()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &nc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ip2int(ip []byte) uint32 {
|
|
||||||
if len(ip) == 16 {
|
|
||||||
return binary.BigEndian.Uint32(ip[12:16])
|
|
||||||
}
|
|
||||||
return binary.BigEndian.Uint32(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
func int2ip(nn uint32) net.IP {
|
|
||||||
ip := make(net.IP, net.IPv4len)
|
|
||||||
binary.BigEndian.PutUint32(ip, nn)
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
func addr2int(addr netip.Addr) uint32 {
|
|
||||||
b := addr.Unmap().As4()
|
|
||||||
return binary.BigEndian.Uint32(b[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func int2addr(nn uint32) netip.Addr {
|
|
||||||
ip := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(ip[:], nn)
|
|
||||||
return netip.AddrFrom4(ip).Unmap()
|
|
||||||
}
|
|
||||||
@ -1,218 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/test"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCertificateV1_Marshal(t *testing.T) {
|
|
||||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
|
||||||
|
|
||||||
nc := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: "testing",
|
|
||||||
networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
},
|
|
||||||
unsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
|
||||||
},
|
|
||||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
notBefore: before,
|
|
||||||
notAfter: after,
|
|
||||||
publicKey: pubKey,
|
|
||||||
isCA: false,
|
|
||||||
issuer: "1234567890abcedfghij1234567890ab",
|
|
||||||
},
|
|
||||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := nc.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
//t.Log("Cert size:", len(b))
|
|
||||||
|
|
||||||
nc2, err := unmarshalCertificateV1(b, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, Version1, nc.Version())
|
|
||||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
|
||||||
assert.Equal(t, nc.Signature(), nc2.Signature())
|
|
||||||
assert.Equal(t, nc.Name(), nc2.Name())
|
|
||||||
assert.Equal(t, nc.NotBefore(), nc2.NotBefore())
|
|
||||||
assert.Equal(t, nc.NotAfter(), nc2.NotAfter())
|
|
||||||
assert.Equal(t, nc.PublicKey(), nc2.PublicKey())
|
|
||||||
assert.Equal(t, nc.IsCA(), nc2.IsCA())
|
|
||||||
|
|
||||||
assert.Equal(t, nc.Networks(), nc2.Networks())
|
|
||||||
assert.Equal(t, nc.UnsafeNetworks(), nc2.UnsafeNetworks())
|
|
||||||
|
|
||||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_Expired(t *testing.T) {
|
|
||||||
nc := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
notBefore: time.Now().Add(time.Second * -60).Round(time.Second),
|
|
||||||
notAfter: time.Now().Add(time.Second * 60).Round(time.Second),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, nc.Expired(time.Now().Add(time.Hour)))
|
|
||||||
assert.True(t, nc.Expired(time.Now().Add(-time.Hour)))
|
|
||||||
assert.False(t, nc.Expired(time.Now()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_MarshalJSON(t *testing.T) {
|
|
||||||
time.Local = time.UTC
|
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
|
||||||
|
|
||||||
nc := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: "testing",
|
|
||||||
networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
},
|
|
||||||
unsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
|
||||||
},
|
|
||||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
notBefore: time.Date(1, 0, 0, 1, 0, 0, 0, time.UTC),
|
|
||||||
notAfter: time.Date(1, 0, 0, 2, 0, 0, 0, time.UTC),
|
|
||||||
publicKey: pubKey,
|
|
||||||
isCA: false,
|
|
||||||
issuer: "1234567890abcedfghij1234567890ab",
|
|
||||||
},
|
|
||||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := nc.MarshalJSON()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.JSONEq(
|
|
||||||
t,
|
|
||||||
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"networks\":[\"10.1.1.1/24\",\"10.1.1.2/16\"],\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"unsafeNetworks\":[\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"3944c53d4267a229295b56cb2d27d459164c010ac97d655063ba421e0670f4ba\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"version\":1}",
|
|
||||||
string(b),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_VerifyPrivateKey(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
err := ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, _, caKey2, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey2)
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c, _, priv, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
err = c.VerifyPrivateKey(Curve_CURVE25519, rawPriv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, priv2 := X25519Keypair()
|
|
||||||
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2)
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_VerifyPrivateKeyP256(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
err := ca.VerifyPrivateKey(Curve_P256, caKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, _, caKey2, _ := NewTestCaCert(Version1, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = ca.VerifyPrivateKey(Curve_P256, caKey2)
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c, _, priv, _ := NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
assert.Equal(t, Curve_P256, curve)
|
|
||||||
err = c.VerifyPrivateKey(Curve_P256, rawPriv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, priv2 := P256Keypair()
|
|
||||||
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that upgrading the protobuf library does not change how certificates
|
|
||||||
// are marshalled, since this would break signature verification
|
|
||||||
func TestMarshalingCertificateV1Consistency(t *testing.T) {
|
|
||||||
before := time.Date(1970, time.January, 1, 1, 1, 1, 1, time.UTC)
|
|
||||||
after := time.Date(9999, time.January, 1, 1, 1, 1, 1, time.UTC)
|
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
|
||||||
|
|
||||||
nc := certificateV1{
|
|
||||||
details: detailsV1{
|
|
||||||
name: "testing",
|
|
||||||
networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
},
|
|
||||||
unsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
},
|
|
||||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
notBefore: before,
|
|
||||||
notAfter: after,
|
|
||||||
publicKey: pubKey,
|
|
||||||
isCA: false,
|
|
||||||
issuer: "1234567890abcedfghij1234567890ab",
|
|
||||||
},
|
|
||||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := nc.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "0a8e010a0774657374696e671212828284508080fcff0f8182845080feffff0f1a12838284488080fcff0f8282844880feffff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328cd1c30cdb8ccf0af073a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf1220313233343536373839306162636564666768696a313233343536373839306162", fmt.Sprintf("%x", b))
|
|
||||||
|
|
||||||
b, err = proto.Marshal(nc.getRawDetails())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "0a0774657374696e671212828284508080fcff0f8182845080feffff0f1a12838284488080fcff0f8282844880feffff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328cd1c30cdb8ccf0af073a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf", fmt.Sprintf("%x", b))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_Copy(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
|
||||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
|
||||||
cc := c.Copy()
|
|
||||||
test.AssertDeepCopyEqual(t, c, cc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalCertificateV1(t *testing.T) {
|
|
||||||
// Test that we don't panic with an invalid certificate (#332)
|
|
||||||
data := []byte("\x98\x00\x00")
|
|
||||||
_, err := unmarshalCertificateV1(data, nil)
|
|
||||||
require.EqualError(t, err, "encoded Details was nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendByteSlices(b ...[]byte) []byte {
|
|
||||||
retSlice := []byte{}
|
|
||||||
for _, v := range b {
|
|
||||||
retSlice = append(retSlice, v...)
|
|
||||||
}
|
|
||||||
return retSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustParsePrefixUnmapped(s string) netip.Prefix {
|
|
||||||
prefix := netip.MustParsePrefix(s)
|
|
||||||
return netip.PrefixFrom(prefix.Addr().Unmap(), prefix.Bits())
|
|
||||||
}
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
Nebula DEFINITIONS AUTOMATIC TAGS ::= BEGIN
|
|
||||||
|
|
||||||
Name ::= UTF8String (SIZE (1..253))
|
|
||||||
Time ::= INTEGER (0..18446744073709551615) -- Seconds since unix epoch, uint64 maximum
|
|
||||||
Network ::= OCTET STRING (SIZE (5,17)) -- IP addresses are 4 or 16 bytes + 1 byte for the prefix length
|
|
||||||
Curve ::= ENUMERATED {
|
|
||||||
curve25519 (0),
|
|
||||||
p256 (1)
|
|
||||||
}
|
|
||||||
|
|
||||||
-- The maximum size of a certificate must not exceed 65536 bytes
|
|
||||||
Certificate ::= SEQUENCE {
|
|
||||||
details OCTET STRING,
|
|
||||||
curve Curve DEFAULT curve25519,
|
|
||||||
publicKey OCTET STRING,
|
|
||||||
-- signature(details + curve + publicKey) using the appropriate method for curve
|
|
||||||
signature OCTET STRING
|
|
||||||
}
|
|
||||||
|
|
||||||
Details ::= SEQUENCE {
|
|
||||||
name Name,
|
|
||||||
|
|
||||||
-- At least 1 ipv4 or ipv6 address must be present if isCA is false
|
|
||||||
networks SEQUENCE OF Network OPTIONAL,
|
|
||||||
unsafeNetworks SEQUENCE OF Network OPTIONAL,
|
|
||||||
groups SEQUENCE OF Name OPTIONAL,
|
|
||||||
isCA BOOLEAN DEFAULT false,
|
|
||||||
notBefore Time,
|
|
||||||
notAfter Time,
|
|
||||||
|
|
||||||
-- issuer is only required if isCA is false, if isCA is true then it must not be present
|
|
||||||
issuer OCTET STRING OPTIONAL,
|
|
||||||
...
|
|
||||||
-- New fields can be added below here
|
|
||||||
}
|
|
||||||
|
|
||||||
END
|
|
||||||
730
cert/cert_v2.go
730
cert/cert_v2.go
@ -1,730 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/ecdh"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
|
||||||
"slices"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/cryptobyte"
|
|
||||||
"golang.org/x/crypto/cryptobyte/asn1"
|
|
||||||
"golang.org/x/crypto/curve25519"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
classConstructed = 0x20
|
|
||||||
classContextSpecific = 0x80
|
|
||||||
|
|
||||||
TagCertDetails = 0 | classConstructed | classContextSpecific
|
|
||||||
TagCertCurve = 1 | classContextSpecific
|
|
||||||
TagCertPublicKey = 2 | classContextSpecific
|
|
||||||
TagCertSignature = 3 | classContextSpecific
|
|
||||||
|
|
||||||
TagDetailsName = 0 | classContextSpecific
|
|
||||||
TagDetailsNetworks = 1 | classConstructed | classContextSpecific
|
|
||||||
TagDetailsUnsafeNetworks = 2 | classConstructed | classContextSpecific
|
|
||||||
TagDetailsGroups = 3 | classConstructed | classContextSpecific
|
|
||||||
TagDetailsIsCA = 4 | classContextSpecific
|
|
||||||
TagDetailsNotBefore = 5 | classContextSpecific
|
|
||||||
TagDetailsNotAfter = 6 | classContextSpecific
|
|
||||||
TagDetailsIssuer = 7 | classContextSpecific
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// MaxCertificateSize is the maximum length a valid certificate can be
|
|
||||||
MaxCertificateSize = 65536
|
|
||||||
|
|
||||||
// MaxNameLength is limited to a maximum realistic DNS domain name to help facilitate DNS systems
|
|
||||||
MaxNameLength = 253
|
|
||||||
|
|
||||||
// MaxNetworkLength is the maximum length a network value can be.
|
|
||||||
// 16 bytes for an ipv6 address + 1 byte for the prefix length
|
|
||||||
MaxNetworkLength = 17
|
|
||||||
)
|
|
||||||
|
|
||||||
type certificateV2 struct {
|
|
||||||
details detailsV2
|
|
||||||
|
|
||||||
// RawDetails contains the entire asn.1 DER encoded Details struct
|
|
||||||
// This is to benefit forwards compatibility in signature checking.
|
|
||||||
// signature(RawDetails + Curve + PublicKey) == Signature
|
|
||||||
rawDetails []byte
|
|
||||||
curve Curve
|
|
||||||
publicKey []byte
|
|
||||||
signature []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type detailsV2 struct {
|
|
||||||
name string
|
|
||||||
networks []netip.Prefix // MUST BE SORTED
|
|
||||||
unsafeNetworks []netip.Prefix // MUST BE SORTED
|
|
||||||
groups []string
|
|
||||||
isCA bool
|
|
||||||
notBefore time.Time
|
|
||||||
notAfter time.Time
|
|
||||||
issuer string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Version() Version {
|
|
||||||
return Version2
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Curve() Curve {
|
|
||||||
return c.curve
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Groups() []string {
|
|
||||||
return c.details.groups
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) IsCA() bool {
|
|
||||||
return c.details.isCA
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Issuer() string {
|
|
||||||
return c.details.issuer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Name() string {
|
|
||||||
return c.details.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Networks() []netip.Prefix {
|
|
||||||
return c.details.networks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) NotAfter() time.Time {
|
|
||||||
return c.details.notAfter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) NotBefore() time.Time {
|
|
||||||
return c.details.notBefore
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) PublicKey() []byte {
|
|
||||||
return c.publicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Signature() []byte {
|
|
||||||
return c.signature
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) UnsafeNetworks() []netip.Prefix {
|
|
||||||
return c.details.unsafeNetworks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Fingerprint() (string, error) {
|
|
||||||
if len(c.rawDetails) == 0 {
|
|
||||||
return "", ErrMissingDetails
|
|
||||||
}
|
|
||||||
|
|
||||||
b := make([]byte, len(c.rawDetails)+1+len(c.publicKey)+len(c.signature))
|
|
||||||
copy(b, c.rawDetails)
|
|
||||||
b[len(c.rawDetails)] = byte(c.curve)
|
|
||||||
copy(b[len(c.rawDetails)+1:], c.publicKey)
|
|
||||||
copy(b[len(c.rawDetails)+1+len(c.publicKey):], c.signature)
|
|
||||||
sum := sha256.Sum256(b)
|
|
||||||
return hex.EncodeToString(sum[:]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) CheckSignature(key []byte) bool {
|
|
||||||
if len(c.rawDetails) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
b := make([]byte, len(c.rawDetails)+1+len(c.publicKey))
|
|
||||||
copy(b, c.rawDetails)
|
|
||||||
b[len(c.rawDetails)] = byte(c.curve)
|
|
||||||
copy(b[len(c.rawDetails)+1:], c.publicKey)
|
|
||||||
|
|
||||||
switch c.curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
return ed25519.Verify(key, b, c.signature)
|
|
||||||
case Curve_P256:
|
|
||||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
|
||||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
|
||||||
hashed := sha256.Sum256(b)
|
|
||||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Expired(t time.Time) bool {
|
|
||||||
return c.details.notBefore.After(t) || c.details.notAfter.Before(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) VerifyPrivateKey(curve Curve, key []byte) error {
|
|
||||||
if curve != c.curve {
|
|
||||||
return ErrPublicPrivateCurveMismatch
|
|
||||||
}
|
|
||||||
if c.details.isCA {
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
// the call to PublicKey below will panic slice bounds out of range otherwise
|
|
||||||
if len(key) != ed25519.PrivateKeySize {
|
|
||||||
return ErrInvalidPrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ed25519.PublicKey(c.publicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
|
||||||
return ErrPublicPrivateKeyMismatch
|
|
||||||
}
|
|
||||||
case Curve_P256:
|
|
||||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInvalidPrivateKey
|
|
||||||
}
|
|
||||||
pub := privkey.PublicKey().Bytes()
|
|
||||||
if !bytes.Equal(pub, c.publicKey) {
|
|
||||||
return ErrPublicPrivateKeyMismatch
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid curve: %s", curve)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var pub []byte
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
var err error
|
|
||||||
pub, err = curve25519.X25519(key, curve25519.Basepoint)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInvalidPrivateKey
|
|
||||||
}
|
|
||||||
case Curve_P256:
|
|
||||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return ErrInvalidPrivateKey
|
|
||||||
}
|
|
||||||
pub = privkey.PublicKey().Bytes()
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid curve: %s", curve)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(pub, c.publicKey) {
|
|
||||||
return ErrPublicPrivateKeyMismatch
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) String() string {
|
|
||||||
mb, err := c.marshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Sprintf("<error marshalling certificate: %v>", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := json.MarshalIndent(mb, "", "\t")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Sprintf("<error marshalling certificate: %v>", err)
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) MarshalForHandshakes() ([]byte, error) {
|
|
||||||
if c.rawDetails == nil {
|
|
||||||
return nil, ErrEmptyRawDetails
|
|
||||||
}
|
|
||||||
var b cryptobyte.Builder
|
|
||||||
// Outermost certificate
|
|
||||||
b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
|
|
||||||
|
|
||||||
// Add the cert details which is already marshalled
|
|
||||||
b.AddBytes(c.rawDetails)
|
|
||||||
|
|
||||||
// Skipping the curve and public key since those come across in a different part of the handshake
|
|
||||||
|
|
||||||
// Add the signature
|
|
||||||
b.AddASN1(TagCertSignature, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddBytes(c.signature)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
return b.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Marshal() ([]byte, error) {
|
|
||||||
if c.rawDetails == nil {
|
|
||||||
return nil, ErrEmptyRawDetails
|
|
||||||
}
|
|
||||||
var b cryptobyte.Builder
|
|
||||||
// Outermost certificate
|
|
||||||
b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
|
|
||||||
|
|
||||||
// Add the cert details which is already marshalled
|
|
||||||
b.AddBytes(c.rawDetails)
|
|
||||||
|
|
||||||
// Add the curve only if its not the default value
|
|
||||||
if c.curve != Curve_CURVE25519 {
|
|
||||||
b.AddASN1(TagCertCurve, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddBytes([]byte{byte(c.curve)})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the public key if it is not empty
|
|
||||||
if c.publicKey != nil {
|
|
||||||
b.AddASN1(TagCertPublicKey, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddBytes(c.publicKey)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the signature
|
|
||||||
b.AddASN1(TagCertSignature, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddBytes(c.signature)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
return b.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) MarshalPEM() ([]byte, error) {
|
|
||||||
b, err := c.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: CertificateV2Banner, Bytes: b}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) MarshalJSON() ([]byte, error) {
|
|
||||||
b, err := c.marshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return json.Marshal(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) marshalJSON() (m, error) {
|
|
||||||
fp, err := c.Fingerprint()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m{
|
|
||||||
"details": m{
|
|
||||||
"name": c.details.name,
|
|
||||||
"networks": c.details.networks,
|
|
||||||
"unsafeNetworks": c.details.unsafeNetworks,
|
|
||||||
"groups": c.details.groups,
|
|
||||||
"notBefore": c.details.notBefore,
|
|
||||||
"notAfter": c.details.notAfter,
|
|
||||||
"isCa": c.details.isCA,
|
|
||||||
"issuer": c.details.issuer,
|
|
||||||
},
|
|
||||||
"version": Version2,
|
|
||||||
"publicKey": fmt.Sprintf("%x", c.publicKey),
|
|
||||||
"curve": c.curve.String(),
|
|
||||||
"fingerprint": fp,
|
|
||||||
"signature": fmt.Sprintf("%x", c.Signature()),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) Copy() Certificate {
|
|
||||||
nc := &certificateV2{
|
|
||||||
details: detailsV2{
|
|
||||||
name: c.details.name,
|
|
||||||
notBefore: c.details.notBefore,
|
|
||||||
notAfter: c.details.notAfter,
|
|
||||||
isCA: c.details.isCA,
|
|
||||||
issuer: c.details.issuer,
|
|
||||||
},
|
|
||||||
curve: c.curve,
|
|
||||||
publicKey: make([]byte, len(c.publicKey)),
|
|
||||||
signature: make([]byte, len(c.signature)),
|
|
||||||
rawDetails: make([]byte, len(c.rawDetails)),
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.details.groups != nil {
|
|
||||||
nc.details.groups = make([]string, len(c.details.groups))
|
|
||||||
copy(nc.details.groups, c.details.groups)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.details.networks != nil {
|
|
||||||
nc.details.networks = make([]netip.Prefix, len(c.details.networks))
|
|
||||||
copy(nc.details.networks, c.details.networks)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.details.unsafeNetworks != nil {
|
|
||||||
nc.details.unsafeNetworks = make([]netip.Prefix, len(c.details.unsafeNetworks))
|
|
||||||
copy(nc.details.unsafeNetworks, c.details.unsafeNetworks)
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(nc.rawDetails, c.rawDetails)
|
|
||||||
copy(nc.signature, c.signature)
|
|
||||||
copy(nc.publicKey, c.publicKey)
|
|
||||||
|
|
||||||
return nc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) fromTBSCertificate(t *TBSCertificate) error {
|
|
||||||
c.details = detailsV2{
|
|
||||||
name: t.Name,
|
|
||||||
networks: t.Networks,
|
|
||||||
unsafeNetworks: t.UnsafeNetworks,
|
|
||||||
groups: t.Groups,
|
|
||||||
isCA: t.IsCA,
|
|
||||||
notBefore: t.NotBefore,
|
|
||||||
notAfter: t.NotAfter,
|
|
||||||
issuer: t.issuer,
|
|
||||||
}
|
|
||||||
c.curve = t.Curve
|
|
||||||
c.publicKey = t.PublicKey
|
|
||||||
return c.validate()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) validate() error {
|
|
||||||
// Empty names are allowed
|
|
||||||
|
|
||||||
if len(c.publicKey) == 0 {
|
|
||||||
return ErrInvalidPublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.details.isCA && len(c.details.networks) == 0 {
|
|
||||||
return NewErrInvalidCertificateProperties("non-CA certificate must contain at least 1 network")
|
|
||||||
}
|
|
||||||
|
|
||||||
hasV4Networks := false
|
|
||||||
hasV6Networks := false
|
|
||||||
for _, network := range c.details.networks {
|
|
||||||
if !network.IsValid() || !network.Addr().IsValid() {
|
|
||||||
return NewErrInvalidCertificateProperties("invalid network: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().IsUnspecified() {
|
|
||||||
return NewErrInvalidCertificateProperties("non-CA certificates must not use the zero address as a network: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().Zone() != "" {
|
|
||||||
return NewErrInvalidCertificateProperties("networks may not contain zones: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().Is4In6() {
|
|
||||||
return NewErrInvalidCertificateProperties("4in6 networks are not allowed: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
hasV4Networks = hasV4Networks || network.Addr().Is4()
|
|
||||||
hasV6Networks = hasV6Networks || network.Addr().Is6()
|
|
||||||
}
|
|
||||||
|
|
||||||
slices.SortFunc(c.details.networks, comparePrefix)
|
|
||||||
err := findDuplicatePrefix(c.details.networks)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, network := range c.details.unsafeNetworks {
|
|
||||||
if !network.IsValid() || !network.Addr().IsValid() {
|
|
||||||
return NewErrInvalidCertificateProperties("invalid unsafe network: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if network.Addr().Zone() != "" {
|
|
||||||
return NewErrInvalidCertificateProperties("unsafe networks may not contain zones: %s", network)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.details.isCA {
|
|
||||||
if network.Addr().Is6() {
|
|
||||||
if !hasV6Networks {
|
|
||||||
return NewErrInvalidCertificateProperties("IPv6 unsafe networks require an IPv6 address assignment: %s", network)
|
|
||||||
}
|
|
||||||
} else if network.Addr().Is4() {
|
|
||||||
if !hasV4Networks {
|
|
||||||
return NewErrInvalidCertificateProperties("IPv4 unsafe networks require an IPv4 address assignment: %s", network)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
slices.SortFunc(c.details.unsafeNetworks, comparePrefix)
|
|
||||||
err = findDuplicatePrefix(c.details.unsafeNetworks)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) marshalForSigning() ([]byte, error) {
|
|
||||||
d, err := c.details.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("marshalling certificate details failed: %w", err)
|
|
||||||
}
|
|
||||||
c.rawDetails = d
|
|
||||||
|
|
||||||
b := make([]byte, len(c.rawDetails)+1+len(c.publicKey))
|
|
||||||
copy(b, c.rawDetails)
|
|
||||||
b[len(c.rawDetails)] = byte(c.curve)
|
|
||||||
copy(b[len(c.rawDetails)+1:], c.publicKey)
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *certificateV2) setSignature(b []byte) error {
|
|
||||||
if len(b) == 0 {
|
|
||||||
return ErrEmptySignature
|
|
||||||
}
|
|
||||||
c.signature = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *detailsV2) Marshal() ([]byte, error) {
|
|
||||||
var b cryptobyte.Builder
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Details are a structure
|
|
||||||
b.AddASN1(TagCertDetails, func(b *cryptobyte.Builder) {
|
|
||||||
|
|
||||||
// Add the name
|
|
||||||
b.AddASN1(TagDetailsName, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddBytes([]byte(d.name))
|
|
||||||
})
|
|
||||||
|
|
||||||
// Add the networks if any exist
|
|
||||||
if len(d.networks) > 0 {
|
|
||||||
b.AddASN1(TagDetailsNetworks, func(b *cryptobyte.Builder) {
|
|
||||||
for _, n := range d.networks {
|
|
||||||
sb, innerErr := n.MarshalBinary()
|
|
||||||
if innerErr != nil {
|
|
||||||
// MarshalBinary never returns an error
|
|
||||||
err = fmt.Errorf("unable to marshal network: %w", innerErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.AddASN1OctetString(sb)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the unsafe networks if any exist
|
|
||||||
if len(d.unsafeNetworks) > 0 {
|
|
||||||
b.AddASN1(TagDetailsUnsafeNetworks, func(b *cryptobyte.Builder) {
|
|
||||||
for _, n := range d.unsafeNetworks {
|
|
||||||
sb, innerErr := n.MarshalBinary()
|
|
||||||
if innerErr != nil {
|
|
||||||
// MarshalBinary never returns an error
|
|
||||||
err = fmt.Errorf("unable to marshal unsafe network: %w", innerErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.AddASN1OctetString(sb)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add groups if any exist
|
|
||||||
if len(d.groups) > 0 {
|
|
||||||
b.AddASN1(TagDetailsGroups, func(b *cryptobyte.Builder) {
|
|
||||||
for _, group := range d.groups {
|
|
||||||
b.AddASN1(asn1.UTF8String, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddBytes([]byte(group))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add IsCA only if true
|
|
||||||
if d.isCA {
|
|
||||||
b.AddASN1(TagDetailsIsCA, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddUint8(0xff)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add not before
|
|
||||||
b.AddASN1Int64WithTag(d.notBefore.Unix(), TagDetailsNotBefore)
|
|
||||||
|
|
||||||
// Add not after
|
|
||||||
b.AddASN1Int64WithTag(d.notAfter.Unix(), TagDetailsNotAfter)
|
|
||||||
|
|
||||||
// Add the issuer if present
|
|
||||||
if d.issuer != "" {
|
|
||||||
issuerBytes, innerErr := hex.DecodeString(d.issuer)
|
|
||||||
if innerErr != nil {
|
|
||||||
err = fmt.Errorf("failed to decode issuer: %w", innerErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.AddASN1(TagDetailsIssuer, func(b *cryptobyte.Builder) {
|
|
||||||
b.AddBytes(issuerBytes)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalCertificateV2(b []byte, publicKey []byte, curve Curve) (*certificateV2, error) {
|
|
||||||
l := len(b)
|
|
||||||
if l == 0 || l > MaxCertificateSize {
|
|
||||||
return nil, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
input := cryptobyte.String(b)
|
|
||||||
// Open the envelope
|
|
||||||
if !input.ReadASN1(&input, asn1.SEQUENCE) || input.Empty() {
|
|
||||||
return nil, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Grab the cert details, we need to preserve the tag and length
|
|
||||||
var rawDetails cryptobyte.String
|
|
||||||
if !input.ReadASN1Element(&rawDetails, TagCertDetails) || rawDetails.Empty() {
|
|
||||||
return nil, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
//Maybe grab the curve
|
|
||||||
var rawCurve byte
|
|
||||||
if !readOptionalASN1Byte(&input, &rawCurve, TagCertCurve, byte(curve)) {
|
|
||||||
return nil, ErrBadFormat
|
|
||||||
}
|
|
||||||
curve = Curve(rawCurve)
|
|
||||||
|
|
||||||
// Maybe grab the public key
|
|
||||||
var rawPublicKey cryptobyte.String
|
|
||||||
if len(publicKey) > 0 {
|
|
||||||
rawPublicKey = publicKey
|
|
||||||
} else if !input.ReadOptionalASN1(&rawPublicKey, nil, TagCertPublicKey) {
|
|
||||||
return nil, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rawPublicKey) == 0 {
|
|
||||||
return nil, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Grab the signature
|
|
||||||
var rawSignature cryptobyte.String
|
|
||||||
if !input.ReadASN1(&rawSignature, TagCertSignature) || rawSignature.Empty() {
|
|
||||||
return nil, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally unmarshal the details
|
|
||||||
details, err := unmarshalDetails(rawDetails)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
c := &certificateV2{
|
|
||||||
details: details,
|
|
||||||
rawDetails: rawDetails,
|
|
||||||
curve: curve,
|
|
||||||
publicKey: rawPublicKey,
|
|
||||||
signature: rawSignature,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.validate()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalDetails(b cryptobyte.String) (detailsV2, error) {
|
|
||||||
// Open the envelope
|
|
||||||
if !b.ReadASN1(&b, TagCertDetails) || b.Empty() {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the name
|
|
||||||
var name cryptobyte.String
|
|
||||||
if !b.ReadASN1(&name, TagDetailsName) || name.Empty() || len(name) > MaxNameLength {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the network addresses
|
|
||||||
var subString cryptobyte.String
|
|
||||||
var found bool
|
|
||||||
|
|
||||||
if !b.ReadOptionalASN1(&subString, &found, TagDetailsNetworks) {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
var networks []netip.Prefix
|
|
||||||
var val cryptobyte.String
|
|
||||||
if found {
|
|
||||||
for !subString.Empty() {
|
|
||||||
if !subString.ReadASN1(&val, asn1.OCTET_STRING) || val.Empty() || len(val) > MaxNetworkLength {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
var n netip.Prefix
|
|
||||||
if err := n.UnmarshalBinary(val); err != nil {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
networks = append(networks, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read out any unsafe networks
|
|
||||||
if !b.ReadOptionalASN1(&subString, &found, TagDetailsUnsafeNetworks) {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
var unsafeNetworks []netip.Prefix
|
|
||||||
if found {
|
|
||||||
for !subString.Empty() {
|
|
||||||
if !subString.ReadASN1(&val, asn1.OCTET_STRING) || val.Empty() || len(val) > MaxNetworkLength {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
var n netip.Prefix
|
|
||||||
if err := n.UnmarshalBinary(val); err != nil {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
unsafeNetworks = append(unsafeNetworks, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read out any groups
|
|
||||||
if !b.ReadOptionalASN1(&subString, &found, TagDetailsGroups) {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
var groups []string
|
|
||||||
if found {
|
|
||||||
for !subString.Empty() {
|
|
||||||
if !subString.ReadASN1(&val, asn1.UTF8String) || val.Empty() {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
groups = append(groups, string(val))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read out IsCA
|
|
||||||
var isCa bool
|
|
||||||
if !readOptionalASN1Boolean(&b, &isCa, TagDetailsIsCA, false) {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read not before and not after
|
|
||||||
var notBefore int64
|
|
||||||
if !b.ReadASN1Int64WithTag(¬Before, TagDetailsNotBefore) {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
var notAfter int64
|
|
||||||
if !b.ReadASN1Int64WithTag(¬After, TagDetailsNotAfter) {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read issuer
|
|
||||||
var issuer cryptobyte.String
|
|
||||||
if !b.ReadOptionalASN1(&issuer, nil, TagDetailsIssuer) {
|
|
||||||
return detailsV2{}, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
return detailsV2{
|
|
||||||
name: string(name),
|
|
||||||
networks: networks,
|
|
||||||
unsafeNetworks: unsafeNetworks,
|
|
||||||
groups: groups,
|
|
||||||
isCA: isCa,
|
|
||||||
notBefore: time.Unix(notBefore, 0),
|
|
||||||
notAfter: time.Unix(notAfter, 0),
|
|
||||||
issuer: hex.EncodeToString(issuer),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
@ -1,267 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"net/netip"
|
|
||||||
"slices"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/test"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCertificateV2_Marshal(t *testing.T) {
|
|
||||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
|
||||||
|
|
||||||
nc := certificateV2{
|
|
||||||
details: detailsV2{
|
|
||||||
name: "testing",
|
|
||||||
networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
},
|
|
||||||
unsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
},
|
|
||||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
notBefore: before,
|
|
||||||
notAfter: after,
|
|
||||||
isCA: false,
|
|
||||||
issuer: "1234567890abcdef1234567890abcdef",
|
|
||||||
},
|
|
||||||
signature: []byte("1234567890abcdef1234567890abcdef"),
|
|
||||||
publicKey: pubKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err := nc.details.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
nc.rawDetails = db
|
|
||||||
|
|
||||||
b, err := nc.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
//t.Log("Cert size:", len(b))
|
|
||||||
|
|
||||||
nc2, err := unmarshalCertificateV2(b, nil, Curve_CURVE25519)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, Version2, nc.Version())
|
|
||||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
|
||||||
assert.Equal(t, nc.Signature(), nc2.Signature())
|
|
||||||
assert.Equal(t, nc.Name(), nc2.Name())
|
|
||||||
assert.Equal(t, nc.NotBefore(), nc2.NotBefore())
|
|
||||||
assert.Equal(t, nc.NotAfter(), nc2.NotAfter())
|
|
||||||
assert.Equal(t, nc.PublicKey(), nc2.PublicKey())
|
|
||||||
assert.Equal(t, nc.IsCA(), nc2.IsCA())
|
|
||||||
assert.Equal(t, nc.Issuer(), nc2.Issuer())
|
|
||||||
|
|
||||||
// unmarshalling will sort networks and unsafeNetworks, we need to do the same
|
|
||||||
// but first make sure it fails
|
|
||||||
assert.NotEqual(t, nc.Networks(), nc2.Networks())
|
|
||||||
assert.NotEqual(t, nc.UnsafeNetworks(), nc2.UnsafeNetworks())
|
|
||||||
|
|
||||||
slices.SortFunc(nc.details.networks, comparePrefix)
|
|
||||||
slices.SortFunc(nc.details.unsafeNetworks, comparePrefix)
|
|
||||||
|
|
||||||
assert.Equal(t, nc.Networks(), nc2.Networks())
|
|
||||||
assert.Equal(t, nc.UnsafeNetworks(), nc2.UnsafeNetworks())
|
|
||||||
|
|
||||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_Expired(t *testing.T) {
|
|
||||||
nc := certificateV2{
|
|
||||||
details: detailsV2{
|
|
||||||
notBefore: time.Now().Add(time.Second * -60).Round(time.Second),
|
|
||||||
notAfter: time.Now().Add(time.Second * 60).Round(time.Second),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, nc.Expired(time.Now().Add(time.Hour)))
|
|
||||||
assert.True(t, nc.Expired(time.Now().Add(-time.Hour)))
|
|
||||||
assert.False(t, nc.Expired(time.Now()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_MarshalJSON(t *testing.T) {
|
|
||||||
time.Local = time.UTC
|
|
||||||
pubKey := []byte("1234567890abcedf1234567890abcedf")
|
|
||||||
|
|
||||||
nc := certificateV2{
|
|
||||||
details: detailsV2{
|
|
||||||
name: "testing",
|
|
||||||
networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
},
|
|
||||||
unsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
|
||||||
},
|
|
||||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
notBefore: time.Date(1, 0, 0, 1, 0, 0, 0, time.UTC),
|
|
||||||
notAfter: time.Date(1, 0, 0, 2, 0, 0, 0, time.UTC),
|
|
||||||
isCA: false,
|
|
||||||
issuer: "1234567890abcedf1234567890abcedf",
|
|
||||||
},
|
|
||||||
publicKey: pubKey,
|
|
||||||
signature: []byte("1234567890abcedf1234567890abcedf1234567890abcedf1234567890abcedf"),
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := nc.MarshalJSON()
|
|
||||||
require.ErrorIs(t, err, ErrMissingDetails)
|
|
||||||
|
|
||||||
rd, err := nc.details.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
nc.rawDetails = rd
|
|
||||||
b, err = nc.MarshalJSON()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.JSONEq(
|
|
||||||
t,
|
|
||||||
"{\"curve\":\"CURVE25519\",\"details\":{\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"isCa\":false,\"issuer\":\"1234567890abcedf1234567890abcedf\",\"name\":\"testing\",\"networks\":[\"10.1.1.1/24\",\"10.1.1.2/16\"],\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"unsafeNetworks\":[\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"152d9a7400c1e001cb76cffd035215ebb351f69eeb797f7f847dd086e15e56dd\",\"publicKey\":\"3132333435363738393061626365646631323334353637383930616263656466\",\"signature\":\"31323334353637383930616263656466313233343536373839306162636564663132333435363738393061626365646631323334353637383930616263656466\",\"version\":2}",
|
|
||||||
string(b),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_VerifyPrivateKey(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
err := ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey[:16])
|
|
||||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
|
||||||
|
|
||||||
_, caKey2, err := ed25519.GenerateKey(rand.Reader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey2)
|
|
||||||
require.ErrorIs(t, err, ErrPublicPrivateKeyMismatch)
|
|
||||||
|
|
||||||
c, _, priv, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
err = c.VerifyPrivateKey(Curve_CURVE25519, rawPriv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, priv2 := X25519Keypair()
|
|
||||||
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
|
||||||
require.ErrorIs(t, err, ErrPublicPrivateCurveMismatch)
|
|
||||||
|
|
||||||
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2)
|
|
||||||
require.ErrorIs(t, err, ErrPublicPrivateKeyMismatch)
|
|
||||||
|
|
||||||
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2[:16])
|
|
||||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
|
||||||
|
|
||||||
ac, ok := c.(*certificateV2)
|
|
||||||
require.True(t, ok)
|
|
||||||
ac.curve = Curve(99)
|
|
||||||
err = c.VerifyPrivateKey(Curve(99), priv2)
|
|
||||||
require.EqualError(t, err, "invalid curve: 99")
|
|
||||||
|
|
||||||
ca2, _, caKey2, _ := NewTestCaCert(Version2, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = ca2.VerifyPrivateKey(Curve_P256, caKey2[:16])
|
|
||||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
|
||||||
|
|
||||||
c, _, priv, _ = NewTestCert(Version2, Curve_P256, ca2, caKey2, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
rawPriv, b, curve, err = UnmarshalPrivateKeyFromPEM(priv)
|
|
||||||
|
|
||||||
err = c.VerifyPrivateKey(Curve_P256, priv[:16])
|
|
||||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
|
||||||
|
|
||||||
err = c.VerifyPrivateKey(Curve_P256, priv)
|
|
||||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
|
||||||
|
|
||||||
aCa, ok := ca2.(*certificateV2)
|
|
||||||
require.True(t, ok)
|
|
||||||
aCa.curve = Curve(99)
|
|
||||||
err = aCa.VerifyPrivateKey(Curve(99), priv2)
|
|
||||||
require.EqualError(t, err, "invalid curve: 99")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_VerifyPrivateKeyP256(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
err := ca.VerifyPrivateKey(Curve_P256, caKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, _, caKey2, _ := NewTestCaCert(Version2, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = ca.VerifyPrivateKey(Curve_P256, caKey2)
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c, _, priv, _ := NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
|
||||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, b)
|
|
||||||
assert.Equal(t, Curve_P256, curve)
|
|
||||||
err = c.VerifyPrivateKey(Curve_P256, rawPriv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, priv2 := P256Keypair()
|
|
||||||
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_Copy(t *testing.T) {
|
|
||||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
|
||||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
|
||||||
cc := c.Copy()
|
|
||||||
test.AssertDeepCopyEqual(t, c, cc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalCertificateV2(t *testing.T) {
|
|
||||||
data := []byte("\x98\x00\x00")
|
|
||||||
_, err := unmarshalCertificateV2(data, nil, Curve_CURVE25519)
|
|
||||||
require.EqualError(t, err, "bad wire format")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV2_marshalForSigningStability(t *testing.T) {
|
|
||||||
before := time.Date(1996, time.May, 5, 0, 0, 0, 0, time.UTC)
|
|
||||||
after := before.Add(time.Second * 60).Round(time.Second)
|
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
|
||||||
|
|
||||||
nc := certificateV2{
|
|
||||||
details: detailsV2{
|
|
||||||
name: "testing",
|
|
||||||
networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
},
|
|
||||||
unsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
},
|
|
||||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
notBefore: before,
|
|
||||||
notAfter: after,
|
|
||||||
isCA: false,
|
|
||||||
issuer: "1234567890abcdef1234567890abcdef",
|
|
||||||
},
|
|
||||||
signature: []byte("1234567890abcdef1234567890abcdef"),
|
|
||||||
publicKey: pubKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
const expectedRawDetailsStr = "a070800774657374696e67a10e04050a0101021004050a01010118a20e0405090101031004050901010218a3270c0b746573742d67726f7570310c0b746573742d67726f7570320c0b746573742d67726f7570338504318bef808604318befbc87101234567890abcdef1234567890abcdef"
|
|
||||||
expectedRawDetails, err := hex.DecodeString(expectedRawDetailsStr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
db, err := nc.details.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedRawDetails, db)
|
|
||||||
|
|
||||||
expectedForSigning, err := hex.DecodeString(expectedRawDetailsStr + "00313233343536373839306162636564666768696a313233343536373839306162")
|
|
||||||
b, err := nc.marshalForSigning()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedForSigning, b)
|
|
||||||
}
|
|
||||||
161
cert/crypto.go
161
cert/crypto.go
@ -3,28 +3,14 @@ package cert
|
|||||||
import (
|
import (
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/argon2"
|
"golang.org/x/crypto/argon2"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type NebulaEncryptedData struct {
|
// KDF factors
|
||||||
EncryptionMetadata NebulaEncryptionMetadata
|
|
||||||
Ciphertext []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type NebulaEncryptionMetadata struct {
|
|
||||||
EncryptionAlgorithm string
|
|
||||||
Argon2Parameters Argon2Parameters
|
|
||||||
}
|
|
||||||
|
|
||||||
// Argon2Parameters KDF factors
|
|
||||||
type Argon2Parameters struct {
|
type Argon2Parameters struct {
|
||||||
version rune
|
version rune
|
||||||
Memory uint32 // KiB
|
Memory uint32 // KiB
|
||||||
@ -33,7 +19,7 @@ type Argon2Parameters struct {
|
|||||||
salt []byte
|
salt []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewArgon2Parameters Returns a new Argon2Parameters object with current version set
|
// Returns a new Argon2Parameters object with current version set
|
||||||
func NewArgon2Parameters(memory uint32, parallelism uint8, iterations uint32) *Argon2Parameters {
|
func NewArgon2Parameters(memory uint32, parallelism uint8, iterations uint32) *Argon2Parameters {
|
||||||
return &Argon2Parameters{
|
return &Argon2Parameters{
|
||||||
version: argon2.Version,
|
version: argon2.Version,
|
||||||
@ -155,146 +141,3 @@ func splitNonceCiphertext(blob []byte, nonceSize int) ([]byte, []byte, error) {
|
|||||||
|
|
||||||
return blob[:nonceSize], blob[nonceSize:], nil
|
return blob[:nonceSize], blob[nonceSize:], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncryptAndMarshalSigningPrivateKey is a simple helper to encrypt and PEM encode a private key
|
|
||||||
func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte, kdfParams *Argon2Parameters) ([]byte, error) {
|
|
||||||
ciphertext, err := aes256Encrypt(passphrase, kdfParams, b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err = proto.Marshal(&RawNebulaEncryptedData{
|
|
||||||
EncryptionMetadata: &RawNebulaEncryptionMetadata{
|
|
||||||
EncryptionAlgorithm: "AES-256-GCM",
|
|
||||||
Argon2Parameters: &RawNebulaArgon2Parameters{
|
|
||||||
Version: kdfParams.version,
|
|
||||||
Memory: kdfParams.Memory,
|
|
||||||
Parallelism: uint32(kdfParams.Parallelism),
|
|
||||||
Iterations: kdfParams.Iterations,
|
|
||||||
Salt: kdfParams.salt,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Ciphertext: ciphertext,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: EncryptedEd25519PrivateKeyBanner, Bytes: b}), nil
|
|
||||||
case Curve_P256:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: EncryptedECDSAP256PrivateKeyBanner, Bytes: b}), nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("invalid curve: %v", curve)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalNebulaEncryptedData will unmarshal a protobuf byte representation of a nebula cert into its
|
|
||||||
// protobuf-generated struct.
|
|
||||||
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
|
||||||
if len(b) == 0 {
|
|
||||||
return nil, fmt.Errorf("nil byte array")
|
|
||||||
}
|
|
||||||
var rned RawNebulaEncryptedData
|
|
||||||
err := proto.Unmarshal(b, &rned)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if rned.EncryptionMetadata == nil {
|
|
||||||
return nil, fmt.Errorf("encoded EncryptionMetadata was nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
if rned.EncryptionMetadata.Argon2Parameters == nil {
|
|
||||||
return nil, fmt.Errorf("encoded Argon2Parameters was nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := unmarshalArgon2Parameters(rned.EncryptionMetadata.Argon2Parameters)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ned := NebulaEncryptedData{
|
|
||||||
EncryptionMetadata: NebulaEncryptionMetadata{
|
|
||||||
EncryptionAlgorithm: rned.EncryptionMetadata.EncryptionAlgorithm,
|
|
||||||
Argon2Parameters: *params,
|
|
||||||
},
|
|
||||||
Ciphertext: rned.Ciphertext,
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ned, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalArgon2Parameters(params *RawNebulaArgon2Parameters) (*Argon2Parameters, error) {
|
|
||||||
if params.Version < math.MinInt32 || params.Version > math.MaxInt32 {
|
|
||||||
return nil, fmt.Errorf("Argon2Parameters Version must be at least %d and no more than %d", math.MinInt32, math.MaxInt32)
|
|
||||||
}
|
|
||||||
if params.Memory <= 0 || params.Memory > math.MaxUint32 {
|
|
||||||
return nil, fmt.Errorf("Argon2Parameters Memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
|
|
||||||
}
|
|
||||||
if params.Parallelism <= 0 || params.Parallelism > math.MaxUint8 {
|
|
||||||
return nil, fmt.Errorf("Argon2Parameters Parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
|
|
||||||
}
|
|
||||||
if params.Iterations <= 0 || params.Iterations > math.MaxUint32 {
|
|
||||||
return nil, fmt.Errorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Argon2Parameters{
|
|
||||||
version: params.Version,
|
|
||||||
Memory: params.Memory,
|
|
||||||
Parallelism: uint8(params.Parallelism),
|
|
||||||
Iterations: params.Iterations,
|
|
||||||
salt: params.Salt,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecryptAndUnmarshalSigningPrivateKey will try to pem decode and decrypt an Ed25519/ECDSA private key with
|
|
||||||
// the given passphrase, returning any other bytes b or an error on failure
|
|
||||||
func DecryptAndUnmarshalSigningPrivateKey(passphrase, b []byte) (Curve, []byte, []byte, error) {
|
|
||||||
var curve Curve
|
|
||||||
|
|
||||||
k, r := pem.Decode(b)
|
|
||||||
if k == nil {
|
|
||||||
return curve, nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch k.Type {
|
|
||||||
case EncryptedEd25519PrivateKeyBanner:
|
|
||||||
curve = Curve_CURVE25519
|
|
||||||
case EncryptedECDSAP256PrivateKeyBanner:
|
|
||||||
curve = Curve_P256
|
|
||||||
default:
|
|
||||||
return curve, nil, r, fmt.Errorf("bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
|
||||||
}
|
|
||||||
|
|
||||||
ned, err := UnmarshalNebulaEncryptedData(k.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return curve, nil, r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var bytes []byte
|
|
||||||
switch ned.EncryptionMetadata.EncryptionAlgorithm {
|
|
||||||
case "AES-256-GCM":
|
|
||||||
bytes, err = aes256Decrypt(passphrase, &ned.EncryptionMetadata.Argon2Parameters, ned.Ciphertext)
|
|
||||||
if err != nil {
|
|
||||||
return curve, nil, r, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return curve, nil, r, fmt.Errorf("unsupported encryption algorithm: %s", ned.EncryptionMetadata.EncryptionAlgorithm)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
if len(bytes) != ed25519.PrivateKeySize {
|
|
||||||
return curve, nil, r, fmt.Errorf("key was not %d bytes, is invalid ed25519 private key", ed25519.PrivateKeySize)
|
|
||||||
}
|
|
||||||
case Curve_P256:
|
|
||||||
if len(bytes) != 32 {
|
|
||||||
return curve, nil, r, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return curve, bytes, r, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@ -4,110 +4,22 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/crypto/argon2"
|
"golang.org/x/crypto/argon2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewArgon2Parameters(t *testing.T) {
|
func TestNewArgon2Parameters(t *testing.T) {
|
||||||
p := NewArgon2Parameters(64*1024, 4, 3)
|
p := NewArgon2Parameters(64*1024, 4, 3)
|
||||||
assert.Equal(t, &Argon2Parameters{
|
assert.EqualValues(t, &Argon2Parameters{
|
||||||
version: argon2.Version,
|
version: argon2.Version,
|
||||||
Memory: 64 * 1024,
|
Memory: 64 * 1024,
|
||||||
Parallelism: 4,
|
Parallelism: 4,
|
||||||
Iterations: 3,
|
Iterations: 3,
|
||||||
}, p)
|
}, p)
|
||||||
p = NewArgon2Parameters(2*1024*1024, 2, 1)
|
p = NewArgon2Parameters(2*1024*1024, 2, 1)
|
||||||
assert.Equal(t, &Argon2Parameters{
|
assert.EqualValues(t, &Argon2Parameters{
|
||||||
version: argon2.Version,
|
version: argon2.Version,
|
||||||
Memory: 2 * 1024 * 1024,
|
Memory: 2 * 1024 * 1024,
|
||||||
Parallelism: 2,
|
Parallelism: 2,
|
||||||
Iterations: 1,
|
Iterations: 1,
|
||||||
}, p)
|
}, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
|
|
||||||
passphrase := []byte("DO NOT USE")
|
|
||||||
privKey := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
|
||||||
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiCPoDfGQiosxNPTbPn5EsMlc2MI
|
|
||||||
c0Bt4oz6gTrFQhX3aBJcimhHKeAuhyTGvllD0Z19fe+DFPcLH3h5VrdjVfIAajg0
|
|
||||||
KrbV3n9UHif/Au5skWmquNJzoW1E4MTdRbvpti6o+WdQ49DxjBFhx0YH8LBqrbPU
|
|
||||||
0BGkUHmIO7daP24=
|
|
||||||
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
shortKey := []byte(`# A key which, once decrypted, is too short
|
|
||||||
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
|
||||||
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiAVJwdfl3r+eqi/vF6S7OMdpjfo
|
|
||||||
hAzmTCRnr58Su4AqmBJbCv3zleYCEKYJP6UI3S8ekLMGISsgO4hm5leukCCyqT0Z
|
|
||||||
cQ76yrberpzkJKoPLGisX8f+xdy4aXSZl7oEYWQte1+vqbtl/eY9PGZhxUQdcyq7
|
|
||||||
hqzIyrRqfUgVuA==
|
|
||||||
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
invalidBanner := []byte(`# Invalid banner (not encrypted)
|
|
||||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
|
||||||
bWRp2CTVFhW9HD/qCd28ltDgK3w8VXSeaEYczDWos8sMUBqDb9jP3+NYwcS4lURG
|
|
||||||
XgLvodMXZJuaFPssp+WwtA==
|
|
||||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
invalidPem := []byte(`# Not a valid PEM format
|
|
||||||
-BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
|
||||||
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
|
||||||
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
|
||||||
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
|
||||||
qrlJ69wer3ZUHFXA
|
|
||||||
-END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
|
|
||||||
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, keyBundle)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
assert.Len(t, k, 64)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
|
||||||
|
|
||||||
// Fail due to short key
|
|
||||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
|
||||||
require.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
|
||||||
|
|
||||||
// Fail due to invalid banner
|
|
||||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
|
||||||
require.EqualError(t, err, "bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
|
||||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
|
||||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
|
|
||||||
// Fail due to invalid passphrase
|
|
||||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey([]byte("invalid passphrase"), privKey)
|
|
||||||
require.EqualError(t, err, "invalid passphrase or corrupt private key")
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, []byte{}, rest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncryptAndMarshalSigningPrivateKey(t *testing.T) {
|
|
||||||
// Having proved that decryption works correctly above, we can test the
|
|
||||||
// encryption function produces a value which can be decrypted
|
|
||||||
passphrase := []byte("passphrase")
|
|
||||||
bytes := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
|
||||||
kdfParams := NewArgon2Parameters(64*1024, 4, 3)
|
|
||||||
key, err := EncryptAndMarshalSigningPrivateKey(Curve_CURVE25519, bytes, passphrase, kdfParams)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify the "key" can be decrypted successfully
|
|
||||||
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, key)
|
|
||||||
assert.Len(t, k, 64)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
assert.Equal(t, []byte{}, rest)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// EncryptAndMarshalEd25519PrivateKey does not create any errors itself
|
|
||||||
}
|
|
||||||
|
|||||||
@ -2,48 +2,14 @@ package cert
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrBadFormat = errors.New("bad wire format")
|
ErrRootExpired = errors.New("root certificate is expired")
|
||||||
ErrRootExpired = errors.New("root certificate is expired")
|
ErrExpired = errors.New("certificate is expired")
|
||||||
ErrExpired = errors.New("certificate is expired")
|
ErrNotCA = errors.New("certificate is not a CA")
|
||||||
ErrNotCA = errors.New("certificate is not a CA")
|
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
ErrBlockListed = errors.New("certificate is in the block list")
|
||||||
ErrBlockListed = errors.New("certificate is in the block list")
|
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||||
ErrFingerprintMismatch = errors.New("certificate fingerprint did not match")
|
ErrInvalidPEMCertificateUnsupported = errors.New("bytes contain an unsupported certificate format")
|
||||||
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
|
||||||
ErrInvalidPublicKey = errors.New("invalid public key")
|
|
||||||
ErrInvalidPrivateKey = errors.New("invalid private key")
|
|
||||||
ErrPublicPrivateCurveMismatch = errors.New("public key does not match private key curve")
|
|
||||||
ErrPublicPrivateKeyMismatch = errors.New("public key and private key are not a pair")
|
|
||||||
ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
|
|
||||||
ErrCaNotFound = errors.New("could not find ca for the certificate")
|
|
||||||
|
|
||||||
ErrInvalidPEMBlock = errors.New("input did not contain a valid PEM encoded block")
|
|
||||||
ErrInvalidPEMCertificateBanner = errors.New("bytes did not contain a proper certificate banner")
|
|
||||||
ErrInvalidPEMX25519PublicKeyBanner = errors.New("bytes did not contain a proper X25519 public key banner")
|
|
||||||
ErrInvalidPEMX25519PrivateKeyBanner = errors.New("bytes did not contain a proper X25519 private key banner")
|
|
||||||
ErrInvalidPEMEd25519PublicKeyBanner = errors.New("bytes did not contain a proper Ed25519 public key banner")
|
|
||||||
ErrInvalidPEMEd25519PrivateKeyBanner = errors.New("bytes did not contain a proper Ed25519 private key banner")
|
|
||||||
|
|
||||||
ErrNoPeerStaticKey = errors.New("no peer static key was present")
|
|
||||||
ErrNoPayload = errors.New("provided payload was empty")
|
|
||||||
|
|
||||||
ErrMissingDetails = errors.New("certificate did not contain details")
|
|
||||||
ErrEmptySignature = errors.New("empty signature")
|
|
||||||
ErrEmptyRawDetails = errors.New("empty rawDetails not allowed")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ErrInvalidCertificateProperties struct {
|
|
||||||
str string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewErrInvalidCertificateProperties(format string, a ...any) error {
|
|
||||||
return &ErrInvalidCertificateProperties{fmt.Sprintf(format, a...)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ErrInvalidCertificateProperties) Error() string {
|
|
||||||
return e.str
|
|
||||||
}
|
|
||||||
|
|||||||
@ -1,141 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdh"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"io"
|
|
||||||
"net/netip"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/curve25519"
|
|
||||||
"golang.org/x/crypto/ed25519"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewTestCaCert will create a new ca certificate
|
|
||||||
func NewTestCaCert(version Version, curve Curve, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (Certificate, []byte, []byte, []byte) {
|
|
||||||
var err error
|
|
||||||
var pub, priv []byte
|
|
||||||
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
pub, priv, err = ed25519.GenerateKey(rand.Reader)
|
|
||||||
case Curve_P256:
|
|
||||||
privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub = elliptic.Marshal(elliptic.P256(), privk.PublicKey.X, privk.PublicKey.Y)
|
|
||||||
priv = privk.D.FillBytes(make([]byte, 32))
|
|
||||||
default:
|
|
||||||
// There is no default to allow the underlying lib to respond with an error
|
|
||||||
}
|
|
||||||
|
|
||||||
if before.IsZero() {
|
|
||||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
}
|
|
||||||
if after.IsZero() {
|
|
||||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &TBSCertificate{
|
|
||||||
Curve: curve,
|
|
||||||
Version: version,
|
|
||||||
Name: "test ca",
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pub,
|
|
||||||
Networks: networks,
|
|
||||||
UnsafeNetworks: unsafeNetworks,
|
|
||||||
Groups: groups,
|
|
||||||
IsCA: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := t.Sign(nil, curve, priv)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pem, err := c.MarshalPEM()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, pub, priv, pem
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTestCert will generate a signed certificate with the provided details.
|
|
||||||
// Expiry times are defaulted if you do not pass them in
|
|
||||||
func NewTestCert(v Version, curve Curve, ca Certificate, key []byte, name string, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (Certificate, []byte, []byte, []byte) {
|
|
||||||
if before.IsZero() {
|
|
||||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
if after.IsZero() {
|
|
||||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(networks) == 0 {
|
|
||||||
networks = []netip.Prefix{netip.MustParsePrefix("10.0.0.123/8")}
|
|
||||||
}
|
|
||||||
|
|
||||||
var pub, priv []byte
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
pub, priv = X25519Keypair()
|
|
||||||
case Curve_P256:
|
|
||||||
pub, priv = P256Keypair()
|
|
||||||
default:
|
|
||||||
panic("unknown curve")
|
|
||||||
}
|
|
||||||
|
|
||||||
nc := &TBSCertificate{
|
|
||||||
Version: v,
|
|
||||||
Curve: curve,
|
|
||||||
Name: name,
|
|
||||||
Networks: networks,
|
|
||||||
UnsafeNetworks: unsafeNetworks,
|
|
||||||
Groups: groups,
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pub,
|
|
||||||
IsCA: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := nc.Sign(ca, ca.Curve(), key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pem, err := c.MarshalPEM()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, pub, MarshalPrivateKeyToPEM(curve, priv), pem
|
|
||||||
}
|
|
||||||
|
|
||||||
func X25519Keypair() ([]byte, []byte) {
|
|
||||||
privkey := make([]byte, 32)
|
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return pubkey, privkey
|
|
||||||
}
|
|
||||||
|
|
||||||
func P256Keypair() ([]byte, []byte) {
|
|
||||||
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pubkey := privkey.PublicKey()
|
|
||||||
return pubkey.Bytes(), privkey.Bytes()
|
|
||||||
}
|
|
||||||
161
cert/pem.go
161
cert/pem.go
@ -1,161 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/ed25519"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
CertificateBanner = "NEBULA CERTIFICATE"
|
|
||||||
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
|
||||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
|
||||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
|
||||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
|
||||||
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
|
||||||
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
|
||||||
|
|
||||||
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
|
||||||
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
|
||||||
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
|
|
||||||
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnmarshalCertificateFromPEM will try to unmarshal the first pem block in a byte array, returning any non consumed
|
|
||||||
// data or an error on failure
|
|
||||||
func UnmarshalCertificateFromPEM(b []byte) (Certificate, []byte, error) {
|
|
||||||
p, r := pem.Decode(b)
|
|
||||||
if p == nil {
|
|
||||||
return nil, r, ErrInvalidPEMBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
var c Certificate
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch p.Type {
|
|
||||||
// Implementations must validate the resulting certificate contains valid information
|
|
||||||
case CertificateBanner:
|
|
||||||
c, err = unmarshalCertificateV1(p.Bytes, nil)
|
|
||||||
case CertificateV2Banner:
|
|
||||||
c, err = unmarshalCertificateV2(p.Bytes, nil, Curve_CURVE25519)
|
|
||||||
default:
|
|
||||||
return nil, r, ErrInvalidPEMCertificateBanner
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, r, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func MarshalPublicKeyToPEM(curve Curve, b []byte) []byte {
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
|
||||||
case Curve_P256:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: P256PublicKeyBanner, Bytes: b})
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnmarshalPublicKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
|
||||||
k, r := pem.Decode(b)
|
|
||||||
if k == nil {
|
|
||||||
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
var expectedLen int
|
|
||||||
var curve Curve
|
|
||||||
switch k.Type {
|
|
||||||
case X25519PublicKeyBanner, Ed25519PublicKeyBanner:
|
|
||||||
expectedLen = 32
|
|
||||||
curve = Curve_CURVE25519
|
|
||||||
case P256PublicKeyBanner:
|
|
||||||
// Uncompressed
|
|
||||||
expectedLen = 65
|
|
||||||
curve = Curve_P256
|
|
||||||
default:
|
|
||||||
return nil, r, 0, fmt.Errorf("bytes did not contain a proper public key banner")
|
|
||||||
}
|
|
||||||
if len(k.Bytes) != expectedLen {
|
|
||||||
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s public key", expectedLen, curve)
|
|
||||||
}
|
|
||||||
return k.Bytes, r, curve, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func MarshalPrivateKeyToPEM(curve Curve, b []byte) []byte {
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
|
||||||
case Curve_P256:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: P256PrivateKeyBanner, Bytes: b})
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func MarshalSigningPrivateKeyToPEM(curve Curve, b []byte) []byte {
|
|
||||||
switch curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: b})
|
|
||||||
case Curve_P256:
|
|
||||||
return pem.EncodeToMemory(&pem.Block{Type: ECDSAP256PrivateKeyBanner, Bytes: b})
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalPrivateKeyFromPEM will try to unmarshal the first pem block in a byte array, returning any non
|
|
||||||
// consumed data or an error on failure
|
|
||||||
func UnmarshalPrivateKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
|
||||||
k, r := pem.Decode(b)
|
|
||||||
if k == nil {
|
|
||||||
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
var expectedLen int
|
|
||||||
var curve Curve
|
|
||||||
switch k.Type {
|
|
||||||
case X25519PrivateKeyBanner:
|
|
||||||
expectedLen = 32
|
|
||||||
curve = Curve_CURVE25519
|
|
||||||
case P256PrivateKeyBanner:
|
|
||||||
expectedLen = 32
|
|
||||||
curve = Curve_P256
|
|
||||||
default:
|
|
||||||
return nil, r, 0, fmt.Errorf("bytes did not contain a proper private key banner")
|
|
||||||
}
|
|
||||||
if len(k.Bytes) != expectedLen {
|
|
||||||
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s private key", expectedLen, curve)
|
|
||||||
}
|
|
||||||
return k.Bytes, r, curve, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnmarshalSigningPrivateKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
|
||||||
k, r := pem.Decode(b)
|
|
||||||
if k == nil {
|
|
||||||
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
var curve Curve
|
|
||||||
switch k.Type {
|
|
||||||
case EncryptedEd25519PrivateKeyBanner:
|
|
||||||
return nil, nil, Curve_CURVE25519, ErrPrivateKeyEncrypted
|
|
||||||
case EncryptedECDSAP256PrivateKeyBanner:
|
|
||||||
return nil, nil, Curve_P256, ErrPrivateKeyEncrypted
|
|
||||||
case Ed25519PrivateKeyBanner:
|
|
||||||
curve = Curve_CURVE25519
|
|
||||||
if len(k.Bytes) != ed25519.PrivateKeySize {
|
|
||||||
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid Ed25519 private key", ed25519.PrivateKeySize)
|
|
||||||
}
|
|
||||||
case ECDSAP256PrivateKeyBanner:
|
|
||||||
curve = Curve_P256
|
|
||||||
if len(k.Bytes) != 32 {
|
|
||||||
return nil, r, 0, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, r, 0, fmt.Errorf("bytes did not contain a proper Ed25519/ECDSA private key banner")
|
|
||||||
}
|
|
||||||
return k.Bytes, r, curve, nil
|
|
||||||
}
|
|
||||||
293
cert/pem_test.go
293
cert/pem_test.go
@ -1,293 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestUnmarshalCertificateFromPEM(t *testing.T) {
|
|
||||||
goodCert := []byte(`
|
|
||||||
# A good cert
|
|
||||||
-----BEGIN NEBULA CERTIFICATE-----
|
|
||||||
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
|
||||||
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
|
||||||
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
|
||||||
-----END NEBULA CERTIFICATE-----
|
|
||||||
`)
|
|
||||||
badBanner := []byte(`# A bad banner
|
|
||||||
-----BEGIN NOT A NEBULA CERTIFICATE-----
|
|
||||||
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
|
||||||
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
|
||||||
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
|
||||||
-----END NOT A NEBULA CERTIFICATE-----
|
|
||||||
`)
|
|
||||||
invalidPem := []byte(`# Not a valid PEM format
|
|
||||||
-BEGIN NEBULA CERTIFICATE-----
|
|
||||||
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
|
||||||
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
|
||||||
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
|
||||||
-END NEBULA CERTIFICATE----`)
|
|
||||||
|
|
||||||
certBundle := appendByteSlices(goodCert, badBanner, invalidPem)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
cert, rest, err := UnmarshalCertificateFromPEM(certBundle)
|
|
||||||
assert.NotNil(t, cert)
|
|
||||||
assert.Equal(t, rest, append(badBanner, invalidPem...))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Fail due to invalid banner.
|
|
||||||
cert, rest, err = UnmarshalCertificateFromPEM(rest)
|
|
||||||
assert.Nil(t, cert)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "bytes did not contain a proper certificate banner")
|
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
|
||||||
cert, rest, err = UnmarshalCertificateFromPEM(rest)
|
|
||||||
assert.Nil(t, cert)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalSigningPrivateKeyFromPEM(t *testing.T) {
|
|
||||||
privKey := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
|
||||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
privP256Key := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA ECDSA P256 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NEBULA ECDSA P256 PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
shortKey := []byte(`# A short key
|
|
||||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
|
||||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
invalidBanner := []byte(`# Invalid banner
|
|
||||||
-----BEGIN NOT A NEBULA PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
|
||||||
-----END NOT A NEBULA PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
invalidPem := []byte(`# Not a valid PEM format
|
|
||||||
-BEGIN NEBULA ED25519 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
|
||||||
-END NEBULA ED25519 PRIVATE KEY-----`)
|
|
||||||
|
|
||||||
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
k, rest, curve, err := UnmarshalSigningPrivateKeyFromPEM(keyBundle)
|
|
||||||
assert.Len(t, k, 64)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
|
||||||
assert.Len(t, k, 32)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
|
||||||
assert.Equal(t, Curve_P256, curve)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Fail due to short key
|
|
||||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
|
||||||
require.EqualError(t, err, "key was not 64 bytes, is invalid Ed25519 private key")
|
|
||||||
|
|
||||||
// Fail due to invalid banner
|
|
||||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "bytes did not contain a proper Ed25519/ECDSA private key banner")
|
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
|
||||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalPrivateKeyFromPEM(t *testing.T) {
|
|
||||||
privKey := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NEBULA X25519 PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
privP256Key := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA P256 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NEBULA P256 PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
shortKey := []byte(`# A short key
|
|
||||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
|
||||||
-----END NEBULA X25519 PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
invalidBanner := []byte(`# Invalid banner
|
|
||||||
-----BEGIN NOT A NEBULA PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NOT A NEBULA PRIVATE KEY-----
|
|
||||||
`)
|
|
||||||
invalidPem := []byte(`# Not a valid PEM format
|
|
||||||
-BEGIN NEBULA X25519 PRIVATE KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-END NEBULA X25519 PRIVATE KEY-----`)
|
|
||||||
|
|
||||||
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
k, rest, curve, err := UnmarshalPrivateKeyFromPEM(keyBundle)
|
|
||||||
assert.Len(t, k, 32)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
|
||||||
assert.Len(t, k, 32)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
|
||||||
assert.Equal(t, Curve_P256, curve)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Fail due to short key
|
|
||||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
|
||||||
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 private key")
|
|
||||||
|
|
||||||
// Fail due to invalid banner
|
|
||||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "bytes did not contain a proper private key banner")
|
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
|
||||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalPublicKeyFromPEM(t *testing.T) {
|
|
||||||
pubKey := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NEBULA ED25519 PUBLIC KEY-----
|
|
||||||
`)
|
|
||||||
shortKey := []byte(`# A short key
|
|
||||||
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
|
||||||
-----END NEBULA ED25519 PUBLIC KEY-----
|
|
||||||
`)
|
|
||||||
invalidBanner := []byte(`# Invalid banner
|
|
||||||
-----BEGIN NOT A NEBULA PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NOT A NEBULA PUBLIC KEY-----
|
|
||||||
`)
|
|
||||||
invalidPem := []byte(`# Not a valid PEM format
|
|
||||||
-BEGIN NEBULA ED25519 PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-END NEBULA ED25519 PUBLIC KEY-----`)
|
|
||||||
|
|
||||||
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
k, rest, curve, err := UnmarshalPublicKeyFromPEM(keyBundle)
|
|
||||||
assert.Len(t, k, 32)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
|
||||||
|
|
||||||
// Fail due to short key
|
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
|
||||||
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
|
|
||||||
|
|
||||||
// Fail due to invalid banner
|
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
require.EqualError(t, err, "bytes did not contain a proper public key banner")
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalX25519PublicKey(t *testing.T) {
|
|
||||||
pubKey := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NEBULA X25519 PUBLIC KEY-----
|
|
||||||
`)
|
|
||||||
pubP256Key := []byte(`# A good key
|
|
||||||
-----BEGIN NEBULA P256 PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NEBULA P256 PUBLIC KEY-----
|
|
||||||
`)
|
|
||||||
shortKey := []byte(`# A short key
|
|
||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
|
||||||
-----END NEBULA X25519 PUBLIC KEY-----
|
|
||||||
`)
|
|
||||||
invalidBanner := []byte(`# Invalid banner
|
|
||||||
-----BEGIN NOT A NEBULA PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-----END NOT A NEBULA PUBLIC KEY-----
|
|
||||||
`)
|
|
||||||
invalidPem := []byte(`# Not a valid PEM format
|
|
||||||
-BEGIN NEBULA X25519 PUBLIC KEY-----
|
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|
||||||
-END NEBULA X25519 PUBLIC KEY-----`)
|
|
||||||
|
|
||||||
keyBundle := appendByteSlices(pubKey, pubP256Key, shortKey, invalidBanner, invalidPem)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
k, rest, curve, err := UnmarshalPublicKeyFromPEM(keyBundle)
|
|
||||||
assert.Len(t, k, 32)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(pubP256Key, shortKey, invalidBanner, invalidPem))
|
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
|
||||||
|
|
||||||
// Success test case
|
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
|
||||||
assert.Len(t, k, 65)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
|
||||||
assert.Equal(t, Curve_P256, curve)
|
|
||||||
|
|
||||||
// Fail due to short key
|
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
|
||||||
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
|
|
||||||
|
|
||||||
// Fail due to invalid banner
|
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
require.EqualError(t, err, "bytes did not contain a proper public key banner")
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
|
|
||||||
// Fail due to ivalid PEM format, because
|
|
||||||
// it's missing the requisite pre-encapsulation boundary.
|
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Equal(t, rest, invalidPem)
|
|
||||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
|
||||||
}
|
|
||||||
167
cert/sign.go
167
cert/sign.go
@ -1,167 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net/netip"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TBSCertificate represents a certificate intended to be signed.
|
|
||||||
// It is invalid to use this structure as a Certificate.
|
|
||||||
type TBSCertificate struct {
|
|
||||||
Version Version
|
|
||||||
Name string
|
|
||||||
Networks []netip.Prefix
|
|
||||||
UnsafeNetworks []netip.Prefix
|
|
||||||
Groups []string
|
|
||||||
IsCA bool
|
|
||||||
NotBefore time.Time
|
|
||||||
NotAfter time.Time
|
|
||||||
PublicKey []byte
|
|
||||||
Curve Curve
|
|
||||||
issuer string
|
|
||||||
}
|
|
||||||
|
|
||||||
type beingSignedCertificate interface {
|
|
||||||
// fromTBSCertificate copies the values from the TBSCertificate to this versions internal representation
|
|
||||||
// Implementations must validate the resulting certificate contains valid information
|
|
||||||
fromTBSCertificate(*TBSCertificate) error
|
|
||||||
|
|
||||||
// marshalForSigning returns the bytes that should be signed
|
|
||||||
marshalForSigning() ([]byte, error)
|
|
||||||
|
|
||||||
// setSignature sets the signature for the certificate that has just been signed. The signature must not be blank.
|
|
||||||
setSignature([]byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignerLambda func(certBytes []byte) ([]byte, error)
|
|
||||||
|
|
||||||
// Sign will create a sealed certificate using details provided by the TBSCertificate as long as those
|
|
||||||
// details do not violate constraints of the signing certificate.
|
|
||||||
// If the TBSCertificate is a CA then signer must be nil.
|
|
||||||
func (t *TBSCertificate) Sign(signer Certificate, curve Curve, key []byte) (Certificate, error) {
|
|
||||||
switch t.Curve {
|
|
||||||
case Curve_CURVE25519:
|
|
||||||
pk := ed25519.PrivateKey(key)
|
|
||||||
sp := func(certBytes []byte) ([]byte, error) {
|
|
||||||
sig := ed25519.Sign(pk, certBytes)
|
|
||||||
return sig, nil
|
|
||||||
}
|
|
||||||
return t.SignWith(signer, curve, sp)
|
|
||||||
case Curve_P256:
|
|
||||||
pk := &ecdsa.PrivateKey{
|
|
||||||
PublicKey: ecdsa.PublicKey{
|
|
||||||
Curve: elliptic.P256(),
|
|
||||||
},
|
|
||||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L95
|
|
||||||
D: new(big.Int).SetBytes(key),
|
|
||||||
}
|
|
||||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L119
|
|
||||||
pk.X, pk.Y = pk.Curve.ScalarBaseMult(key)
|
|
||||||
sp := func(certBytes []byte) ([]byte, error) {
|
|
||||||
// We need to hash first for ECDSA
|
|
||||||
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
|
|
||||||
hashed := sha256.Sum256(certBytes)
|
|
||||||
return ecdsa.SignASN1(rand.Reader, pk, hashed[:])
|
|
||||||
}
|
|
||||||
return t.SignWith(signer, curve, sp)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("invalid curve: %s", t.Curve)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignWith does the same thing as sign, but uses the function in `sp` to calculate the signature.
|
|
||||||
// You should only use SignWith if you do not have direct access to your private key.
|
|
||||||
func (t *TBSCertificate) SignWith(signer Certificate, curve Curve, sp SignerLambda) (Certificate, error) {
|
|
||||||
if curve != t.Curve {
|
|
||||||
return nil, fmt.Errorf("curve in cert and private key supplied don't match")
|
|
||||||
}
|
|
||||||
|
|
||||||
if signer != nil {
|
|
||||||
if t.IsCA {
|
|
||||||
return nil, fmt.Errorf("can not sign a CA certificate with another")
|
|
||||||
}
|
|
||||||
|
|
||||||
err := checkCAConstraints(signer, t.NotBefore, t.NotAfter, t.Groups, t.Networks, t.UnsafeNetworks)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
issuer, err := signer.Fingerprint()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error computing issuer: %v", err)
|
|
||||||
}
|
|
||||||
t.issuer = issuer
|
|
||||||
} else {
|
|
||||||
if !t.IsCA {
|
|
||||||
return nil, fmt.Errorf("self signed certificates must have IsCA set to true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var c beingSignedCertificate
|
|
||||||
switch t.Version {
|
|
||||||
case Version1:
|
|
||||||
c = &certificateV1{}
|
|
||||||
err := c.fromTBSCertificate(t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case Version2:
|
|
||||||
c = &certificateV2{}
|
|
||||||
err := c.fromTBSCertificate(t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown cert version %d", t.Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
certBytes, err := c.marshalForSigning()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sig, err := sp(certBytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.setSignature(sig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sc, ok := c.(Certificate)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("invalid certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
return sc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func comparePrefix(a, b netip.Prefix) int {
|
|
||||||
addr := a.Addr().Compare(b.Addr())
|
|
||||||
if addr == 0 {
|
|
||||||
return a.Bits() - b.Bits()
|
|
||||||
}
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// findDuplicatePrefix returns an error if there is a duplicate prefix in the pre-sorted input slice sortedPrefixes
|
|
||||||
func findDuplicatePrefix(sortedPrefixes []netip.Prefix) error {
|
|
||||||
if len(sortedPrefixes) < 2 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for i := 1; i < len(sortedPrefixes); i++ {
|
|
||||||
if comparePrefix(sortedPrefixes[i], sortedPrefixes[i-1]) == 0 {
|
|
||||||
return NewErrInvalidCertificateProperties("duplicate network detected: %v", sortedPrefixes[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,91 +0,0 @@
|
|||||||
package cert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"net/netip"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCertificateV1_Sign(t *testing.T) {
|
|
||||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
|
||||||
|
|
||||||
tbs := TBSCertificate{
|
|
||||||
Version: Version1,
|
|
||||||
Name: "testing",
|
|
||||||
Networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
},
|
|
||||||
UnsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/24"),
|
|
||||||
},
|
|
||||||
Groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
NotBefore: before,
|
|
||||||
NotAfter: after,
|
|
||||||
PublicKey: pubKey,
|
|
||||||
IsCA: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
|
||||||
c, err := tbs.Sign(&certificateV1{details: detailsV1{notBefore: before, notAfter: after}}, Curve_CURVE25519, priv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, c)
|
|
||||||
assert.True(t, c.CheckSignature(pub))
|
|
||||||
|
|
||||||
b, err := c.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
uc, err := unmarshalCertificateV1(b, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, uc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertificateV1_SignP256(t *testing.T) {
|
|
||||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
pubKey := []byte("01234567890abcedfghij1234567890ab1234567890abcedfghij1234567890ab")
|
|
||||||
|
|
||||||
tbs := TBSCertificate{
|
|
||||||
Version: Version1,
|
|
||||||
Name: "testing",
|
|
||||||
Networks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
|
||||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
|
||||||
},
|
|
||||||
UnsafeNetworks: []netip.Prefix{
|
|
||||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
|
||||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
|
||||||
},
|
|
||||||
Groups: []string{"test-group1", "test-group2", "test-group3"},
|
|
||||||
NotBefore: before,
|
|
||||||
NotAfter: after,
|
|
||||||
PublicKey: pubKey,
|
|
||||||
IsCA: false,
|
|
||||||
Curve: Curve_P256,
|
|
||||||
}
|
|
||||||
|
|
||||||
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
|
|
||||||
rawPriv := priv.D.FillBytes(make([]byte, 32))
|
|
||||||
|
|
||||||
c, err := tbs.Sign(&certificateV1{details: detailsV1{notBefore: before, notAfter: after}}, Curve_P256, rawPriv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, c)
|
|
||||||
assert.True(t, c.CheckSignature(pub))
|
|
||||||
|
|
||||||
b, err := c.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
uc, err := unmarshalCertificateV1(b, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, uc)
|
|
||||||
}
|
|
||||||
@ -1,138 +0,0 @@
|
|||||||
package cert_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdh"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"io"
|
|
||||||
"net/netip"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"golang.org/x/crypto/curve25519"
|
|
||||||
"golang.org/x/crypto/ed25519"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewTestCaCert will create a new ca certificate
|
|
||||||
func NewTestCaCert(version cert.Version, curve cert.Curve, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte, []byte, []byte) {
|
|
||||||
var err error
|
|
||||||
var pub, priv []byte
|
|
||||||
|
|
||||||
switch curve {
|
|
||||||
case cert.Curve_CURVE25519:
|
|
||||||
pub, priv, err = ed25519.GenerateKey(rand.Reader)
|
|
||||||
case cert.Curve_P256:
|
|
||||||
privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub = elliptic.Marshal(elliptic.P256(), privk.PublicKey.X, privk.PublicKey.Y)
|
|
||||||
priv = privk.D.FillBytes(make([]byte, 32))
|
|
||||||
default:
|
|
||||||
// There is no default to allow the underlying lib to respond with an error
|
|
||||||
}
|
|
||||||
|
|
||||||
if before.IsZero() {
|
|
||||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
}
|
|
||||||
if after.IsZero() {
|
|
||||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &cert.TBSCertificate{
|
|
||||||
Curve: curve,
|
|
||||||
Version: version,
|
|
||||||
Name: "test ca",
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pub,
|
|
||||||
Networks: networks,
|
|
||||||
UnsafeNetworks: unsafeNetworks,
|
|
||||||
Groups: groups,
|
|
||||||
IsCA: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := t.Sign(nil, curve, priv)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pem, err := c.MarshalPEM()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, pub, priv, pem
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTestCert will generate a signed certificate with the provided details.
|
|
||||||
// Expiry times are defaulted if you do not pass them in
|
|
||||||
func NewTestCert(v cert.Version, curve cert.Curve, ca cert.Certificate, key []byte, name string, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte, []byte, []byte) {
|
|
||||||
if before.IsZero() {
|
|
||||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
if after.IsZero() {
|
|
||||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
var pub, priv []byte
|
|
||||||
switch curve {
|
|
||||||
case cert.Curve_CURVE25519:
|
|
||||||
pub, priv = X25519Keypair()
|
|
||||||
case cert.Curve_P256:
|
|
||||||
pub, priv = P256Keypair()
|
|
||||||
default:
|
|
||||||
panic("unknown curve")
|
|
||||||
}
|
|
||||||
|
|
||||||
nc := &cert.TBSCertificate{
|
|
||||||
Version: v,
|
|
||||||
Curve: curve,
|
|
||||||
Name: name,
|
|
||||||
Networks: networks,
|
|
||||||
UnsafeNetworks: unsafeNetworks,
|
|
||||||
Groups: groups,
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pub,
|
|
||||||
IsCA: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := nc.Sign(ca, ca.Curve(), key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pem, err := c.MarshalPEM()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, pub, cert.MarshalPrivateKeyToPEM(curve, priv), pem
|
|
||||||
}
|
|
||||||
|
|
||||||
func X25519Keypair() ([]byte, []byte) {
|
|
||||||
privkey := make([]byte, 32)
|
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return pubkey, privkey
|
|
||||||
}
|
|
||||||
|
|
||||||
func P256Keypair() ([]byte, []byte) {
|
|
||||||
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pubkey := privkey.PublicKey()
|
|
||||||
return pubkey.Bytes(), privkey.Bytes()
|
|
||||||
}
|
|
||||||
@ -8,14 +8,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/skip2/go-qrcode"
|
"github.com/skip2/go-qrcode"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/pkclient"
|
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,43 +26,32 @@ type caFlags struct {
|
|||||||
outCertPath *string
|
outCertPath *string
|
||||||
outQRPath *string
|
outQRPath *string
|
||||||
groups *string
|
groups *string
|
||||||
networks *string
|
ips *string
|
||||||
unsafeNetworks *string
|
subnets *string
|
||||||
argonMemory *uint
|
argonMemory *uint
|
||||||
argonIterations *uint
|
argonIterations *uint
|
||||||
argonParallelism *uint
|
argonParallelism *uint
|
||||||
encryption *bool
|
encryption *bool
|
||||||
version *uint
|
|
||||||
|
|
||||||
curve *string
|
curve *string
|
||||||
p11url *string
|
|
||||||
|
|
||||||
// Deprecated options
|
|
||||||
ips *string
|
|
||||||
subnets *string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCaFlags() *caFlags {
|
func newCaFlags() *caFlags {
|
||||||
cf := caFlags{set: flag.NewFlagSet("ca", flag.ContinueOnError)}
|
cf := caFlags{set: flag.NewFlagSet("ca", flag.ContinueOnError)}
|
||||||
cf.set.Usage = func() {}
|
cf.set.Usage = func() {}
|
||||||
cf.name = cf.set.String("name", "", "Required: name of the certificate authority")
|
cf.name = cf.set.String("name", "", "Required: name of the certificate authority")
|
||||||
cf.version = cf.set.Uint("version", uint(cert.Version2), "Optional: version of the certificate format to use")
|
|
||||||
cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
||||||
cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to")
|
cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to")
|
||||||
cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to")
|
cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to")
|
||||||
cf.outQRPath = cf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
cf.outQRPath = cf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
||||||
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
||||||
cf.networks = cf.set.String("networks", "", "Optional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in networks")
|
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses")
|
||||||
cf.unsafeNetworks = cf.set.String("unsafe-networks", "", "Optional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in unsafe networks")
|
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets")
|
||||||
cf.argonMemory = cf.set.Uint("argon-memory", 2*1024*1024, "Optional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase")
|
cf.argonMemory = cf.set.Uint("argon-memory", 2*1024*1024, "Optional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase")
|
||||||
cf.argonParallelism = cf.set.Uint("argon-parallelism", 4, "Optional: Argon2 parallelism parameter used for encrypted private key passphrase")
|
cf.argonParallelism = cf.set.Uint("argon-parallelism", 4, "Optional: Argon2 parallelism parameter used for encrypted private key passphrase")
|
||||||
cf.argonIterations = cf.set.Uint("argon-iterations", 1, "Optional: Argon2 iterations parameter used for encrypted private key passphrase")
|
cf.argonIterations = cf.set.Uint("argon-iterations", 1, "Optional: Argon2 iterations parameter used for encrypted private key passphrase")
|
||||||
cf.encryption = cf.set.Bool("encrypt", false, "Optional: prompt for passphrase and write out-key in an encrypted format")
|
cf.encryption = cf.set.Bool("encrypt", false, "Optional: prompt for passphrase and write out-key in an encrypted format")
|
||||||
cf.curve = cf.set.String("curve", "25519", "EdDSA/ECDSA Curve (25519, P256)")
|
cf.curve = cf.set.String("curve", "25519", "EdDSA/ECDSA Curve (25519, P256)")
|
||||||
cf.p11url = p11Flag(cf.set)
|
|
||||||
|
|
||||||
cf.ips = cf.set.String("ips", "", "Deprecated, see -networks")
|
|
||||||
cf.subnets = cf.set.String("subnets", "", "Deprecated, see -unsafe-networks")
|
|
||||||
return &cf
|
return &cf
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,21 +76,17 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
isP11 := len(*cf.p11url) > 0
|
|
||||||
|
|
||||||
if err := mustFlagString("name", cf.name); err != nil {
|
if err := mustFlagString("name", cf.name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !isP11 {
|
if err := mustFlagString("out-key", cf.outKeyPath); err != nil {
|
||||||
if err = mustFlagString("out-key", cf.outKeyPath); err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
|
if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var kdfParams *cert.Argon2Parameters
|
var kdfParams *cert.Argon2Parameters
|
||||||
if !isP11 && *cf.encryption {
|
if *cf.encryption {
|
||||||
if kdfParams, err = parseArgonParameters(*cf.argonMemory, *cf.argonParallelism, *cf.argonIterations); err != nil {
|
if kdfParams, err = parseArgonParameters(*cf.argonMemory, *cf.argonParallelism, *cf.argonIterations); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -122,57 +106,44 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
version := cert.Version(*cf.version)
|
var ips []*net.IPNet
|
||||||
if version != cert.Version1 && version != cert.Version2 {
|
if *cf.ips != "" {
|
||||||
return newHelpErrorf("-version must be either %v or %v", cert.Version1, cert.Version2)
|
for _, rs := range strings.Split(*cf.ips, ",") {
|
||||||
}
|
|
||||||
|
|
||||||
var networks []netip.Prefix
|
|
||||||
if *cf.networks == "" && *cf.ips != "" {
|
|
||||||
// Pull up deprecated -ips flag if needed
|
|
||||||
*cf.networks = *cf.ips
|
|
||||||
}
|
|
||||||
|
|
||||||
if *cf.networks != "" {
|
|
||||||
for _, rs := range strings.Split(*cf.networks, ",") {
|
|
||||||
rs := strings.Trim(rs, " ")
|
rs := strings.Trim(rs, " ")
|
||||||
if rs != "" {
|
if rs != "" {
|
||||||
n, err := netip.ParsePrefix(rs)
|
ip, ipNet, err := net.ParseCIDR(rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return newHelpErrorf("invalid -networks definition: %s", rs)
|
return newHelpErrorf("invalid ip definition: %s", err)
|
||||||
}
|
}
|
||||||
if version == cert.Version1 && !n.Addr().Is4() {
|
if ip.To4() == nil {
|
||||||
return newHelpErrorf("invalid -networks definition: v1 certificates can only be ipv4, have %s", rs)
|
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", rs)
|
||||||
}
|
}
|
||||||
networks = append(networks, n)
|
|
||||||
|
ipNet.IP = ip
|
||||||
|
ips = append(ips, ipNet)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var unsafeNetworks []netip.Prefix
|
var subnets []*net.IPNet
|
||||||
if *cf.unsafeNetworks == "" && *cf.subnets != "" {
|
if *cf.subnets != "" {
|
||||||
// Pull up deprecated -subnets flag if needed
|
for _, rs := range strings.Split(*cf.subnets, ",") {
|
||||||
*cf.unsafeNetworks = *cf.subnets
|
|
||||||
}
|
|
||||||
|
|
||||||
if *cf.unsafeNetworks != "" {
|
|
||||||
for _, rs := range strings.Split(*cf.unsafeNetworks, ",") {
|
|
||||||
rs := strings.Trim(rs, " ")
|
rs := strings.Trim(rs, " ")
|
||||||
if rs != "" {
|
if rs != "" {
|
||||||
n, err := netip.ParsePrefix(rs)
|
_, s, err := net.ParseCIDR(rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return newHelpErrorf("invalid -unsafe-networks definition: %s", rs)
|
return newHelpErrorf("invalid subnet definition: %s", err)
|
||||||
}
|
}
|
||||||
if version == cert.Version1 && !n.Addr().Is4() {
|
if s.IP.To4() == nil {
|
||||||
return newHelpErrorf("invalid -unsafe-networks definition: v1 certificates can only be ipv4, have %s", rs)
|
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
|
||||||
}
|
}
|
||||||
unsafeNetworks = append(unsafeNetworks, n)
|
subnets = append(subnets, s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var passphrase []byte
|
var passphrase []byte
|
||||||
if !isP11 && *cf.encryption {
|
if *cf.encryption {
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
out.Write([]byte("Enter passphrase: "))
|
out.Write([]byte("Enter passphrase: "))
|
||||||
passphrase, err = pr.ReadPassword()
|
passphrase, err = pr.ReadPassword()
|
||||||
@ -195,109 +166,74 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||||||
|
|
||||||
var curve cert.Curve
|
var curve cert.Curve
|
||||||
var pub, rawPriv []byte
|
var pub, rawPriv []byte
|
||||||
var p11Client *pkclient.PKClient
|
switch *cf.curve {
|
||||||
|
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||||
if isP11 {
|
curve = cert.Curve_CURVE25519
|
||||||
switch *cf.curve {
|
pub, rawPriv, err = ed25519.GenerateKey(rand.Reader)
|
||||||
case "P256":
|
|
||||||
curve = cert.Curve_P256
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid curve for PKCS#11: %s", *cf.curve)
|
|
||||||
}
|
|
||||||
|
|
||||||
p11Client, err = pkclient.FromUrl(*cf.p11url)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while creating PKCS#11 client: %w", err)
|
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
||||||
}
|
}
|
||||||
defer func(client *pkclient.PKClient) {
|
case "P256":
|
||||||
_ = client.Close()
|
var key *ecdsa.PrivateKey
|
||||||
}(p11Client)
|
curve = cert.Curve_P256
|
||||||
pub, err = p11Client.GetPubKey()
|
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while getting public key with PKCS#11: %w", err)
|
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
switch *cf.curve {
|
|
||||||
case "25519", "X25519", "Curve25519", "CURVE25519":
|
|
||||||
curve = cert.Curve_CURVE25519
|
|
||||||
pub, rawPriv, err = ed25519.GenerateKey(rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
|
||||||
}
|
|
||||||
case "P256":
|
|
||||||
var key *ecdsa.PrivateKey
|
|
||||||
curve = cert.Curve_P256
|
|
||||||
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ecdh.PrivateKey lets us get at the encoded bytes, even though
|
// ecdh.PrivateKey lets us get at the encoded bytes, even though
|
||||||
// we aren't using ECDH here.
|
// we aren't using ECDH here.
|
||||||
eKey, err := key.ECDH()
|
eKey, err := key.ECDH()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while converting ecdsa key: %s", err)
|
return fmt.Errorf("error while converting ecdsa key: %s", err)
|
||||||
}
|
|
||||||
rawPriv = eKey.Bytes()
|
|
||||||
pub = eKey.PublicKey().Bytes()
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
|
||||||
}
|
}
|
||||||
|
rawPriv = eKey.Bytes()
|
||||||
|
pub = eKey.PublicKey().Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &cert.TBSCertificate{
|
nc := cert.NebulaCertificate{
|
||||||
Version: version,
|
Details: cert.NebulaCertificateDetails{
|
||||||
Name: *cf.name,
|
Name: *cf.name,
|
||||||
Groups: groups,
|
Groups: groups,
|
||||||
Networks: networks,
|
Ips: ips,
|
||||||
UnsafeNetworks: unsafeNetworks,
|
Subnets: subnets,
|
||||||
NotBefore: time.Now(),
|
NotBefore: time.Now(),
|
||||||
NotAfter: time.Now().Add(*cf.duration),
|
NotAfter: time.Now().Add(*cf.duration),
|
||||||
PublicKey: pub,
|
PublicKey: pub,
|
||||||
IsCA: true,
|
IsCA: true,
|
||||||
Curve: curve,
|
Curve: curve,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isP11 {
|
if _, err := os.Stat(*cf.outKeyPath); err == nil {
|
||||||
if _, err := os.Stat(*cf.outKeyPath); err == nil {
|
return fmt.Errorf("refusing to overwrite existing CA key: %s", *cf.outKeyPath)
|
||||||
return fmt.Errorf("refusing to overwrite existing CA key: %s", *cf.outKeyPath)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(*cf.outCertPath); err == nil {
|
if _, err := os.Stat(*cf.outCertPath); err == nil {
|
||||||
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
|
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
var c cert.Certificate
|
err = nc.Sign(curve, rawPriv)
|
||||||
var b []byte
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while signing: %s", err)
|
||||||
if isP11 {
|
|
||||||
c, err = t.SignWith(nil, curve, p11Client.SignASN1)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while signing with PKCS#11: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c, err = t.Sign(nil, curve, rawPriv)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while signing: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if *cf.encryption {
|
|
||||||
b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while encrypting out-key: %s", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
b = cert.MarshalSigningPrivateKeyToPEM(curve, rawPriv)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.WriteFile(*cf.outKeyPath, b, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err = c.MarshalPEM()
|
var b []byte
|
||||||
|
if *cf.encryption {
|
||||||
|
b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while encrypting out-key: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b = cert.MarshalSigningPrivateKey(curve, rawPriv)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.WriteFile(*cf.outKeyPath, b, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = nc.MarshalToPEM()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,9 +14,10 @@ import (
|
|||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: test file permissions
|
||||||
|
|
||||||
func Test_caSummary(t *testing.T) {
|
func Test_caSummary(t *testing.T) {
|
||||||
assert.Equal(t, "ca <flags>: create a self signed certificate authority", caSummary())
|
assert.Equal(t, "ca <flags>: create a self signed certificate authority", caSummary())
|
||||||
}
|
}
|
||||||
@ -42,24 +43,17 @@ func Test_caHelp(t *testing.T) {
|
|||||||
" -groups string\n"+
|
" -groups string\n"+
|
||||||
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
|
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
|
||||||
" -ips string\n"+
|
" -ips string\n"+
|
||||||
" Deprecated, see -networks\n"+
|
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses\n"+
|
||||||
" -name string\n"+
|
" -name string\n"+
|
||||||
" \tRequired: name of the certificate authority\n"+
|
" \tRequired: name of the certificate authority\n"+
|
||||||
" -networks string\n"+
|
|
||||||
" \tOptional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in networks\n"+
|
|
||||||
" -out-crt string\n"+
|
" -out-crt string\n"+
|
||||||
" \tOptional: path to write the certificate to (default \"ca.crt\")\n"+
|
" \tOptional: path to write the certificate to (default \"ca.crt\")\n"+
|
||||||
" -out-key string\n"+
|
" -out-key string\n"+
|
||||||
" \tOptional: path to write the private key to (default \"ca.key\")\n"+
|
" \tOptional: path to write the private key to (default \"ca.key\")\n"+
|
||||||
" -out-qr string\n"+
|
" -out-qr string\n"+
|
||||||
" \tOptional: output a qr code image (png) of the certificate\n"+
|
" \tOptional: output a qr code image (png) of the certificate\n"+
|
||||||
optionalPkcs11String(" -pkcs11 string\n \tOptional: PKCS#11 URI to an existing private key\n")+
|
|
||||||
" -subnets string\n"+
|
" -subnets string\n"+
|
||||||
" \tDeprecated, see -unsafe-networks\n"+
|
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets\n",
|
||||||
" -unsafe-networks string\n"+
|
|
||||||
" \tOptional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in unsafe networks\n"+
|
|
||||||
" -version uint\n"+
|
|
||||||
" \tOptional: version of the certificate format to use (default 2)\n",
|
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -88,94 +82,93 @@ func Test_ca(t *testing.T) {
|
|||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, ca(
|
assertHelpError(t, ca(
|
||||||
[]string{"-version", "1", "-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
|
[]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
|
||||||
), "-name is required")
|
), "-name is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// ipv4 only ips
|
// ipv4 only ips
|
||||||
assertHelpError(t, ca([]string{"-version", "1", "-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid -networks definition: v1 certificates can only be ipv4, have 100::100/100")
|
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// ipv4 only subnets
|
// ipv4 only subnets
|
||||||
assertHelpError(t, ca([]string{"-version", "1", "-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid -unsafe-networks definition: v1 certificates can only be ipv4, have 100::100/100")
|
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// failed key write
|
// failed key write
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args := []string{"-version", "1", "-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
||||||
require.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp key file
|
// create temp key file
|
||||||
keyF, err := os.CreateTemp("", "test.key")
|
keyF, err := os.CreateTemp("", "test.key")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
require.NoError(t, os.Remove(keyF.Name()))
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
// failed cert write
|
// failed cert write
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
||||||
require.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp cert file
|
// create temp cert file
|
||||||
crtF, err := os.CreateTemp("", "test.crt")
|
crtF, err := os.CreateTemp("", "test.crt")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
require.NoError(t, os.Remove(crtF.Name()))
|
os.Remove(crtF.Name())
|
||||||
require.NoError(t, os.Remove(keyF.Name()))
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
// test proper cert with removed empty groups and subnets
|
// test proper cert with removed empty groups and subnets
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
require.NoError(t, ca(args, ob, eb, nopw))
|
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// read cert and key files
|
// read cert and key files
|
||||||
rb, _ := os.ReadFile(keyF.Name())
|
rb, _ := os.ReadFile(keyF.Name())
|
||||||
lKey, b, c, err := cert.UnmarshalSigningPrivateKeyFromPEM(rb)
|
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
|
||||||
assert.Equal(t, cert.Curve_CURVE25519, c)
|
assert.Len(t, b, 0)
|
||||||
assert.Empty(t, b)
|
assert.Nil(t, err)
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, lKey, 64)
|
assert.Len(t, lKey, 64)
|
||||||
|
|
||||||
rb, _ = os.ReadFile(crtF.Name())
|
rb, _ = os.ReadFile(crtF.Name())
|
||||||
lCrt, b, err := cert.UnmarshalCertificateFromPEM(rb)
|
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||||
assert.Empty(t, b)
|
assert.Len(t, b, 0)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
assert.Equal(t, "test", lCrt.Name())
|
assert.Equal(t, "test", lCrt.Details.Name)
|
||||||
assert.Empty(t, lCrt.Networks())
|
assert.Len(t, lCrt.Details.Ips, 0)
|
||||||
assert.True(t, lCrt.IsCA())
|
assert.True(t, lCrt.Details.IsCA)
|
||||||
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Groups())
|
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Details.Groups)
|
||||||
assert.Empty(t, lCrt.UnsafeNetworks())
|
assert.Len(t, lCrt.Details.Subnets, 0)
|
||||||
assert.Len(t, lCrt.PublicKey(), 32)
|
assert.Len(t, lCrt.Details.PublicKey, 32)
|
||||||
assert.Equal(t, time.Duration(time.Minute*100), lCrt.NotAfter().Sub(lCrt.NotBefore()))
|
assert.Equal(t, time.Duration(time.Minute*100), lCrt.Details.NotAfter.Sub(lCrt.Details.NotBefore))
|
||||||
assert.Empty(t, lCrt.Issuer())
|
assert.Equal(t, "", lCrt.Details.Issuer)
|
||||||
assert.True(t, lCrt.CheckSignature(lCrt.PublicKey()))
|
assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey))
|
||||||
|
|
||||||
// test encrypted key
|
// test encrypted key
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
require.NoError(t, ca(args, ob, eb, testpw))
|
assert.Nil(t, ca(args, ob, eb, testpw))
|
||||||
assert.Equal(t, pwPromptOb, ob.String())
|
assert.Equal(t, pwPromptOb, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// read encrypted key file and verify default params
|
// read encrypted key file and verify default params
|
||||||
rb, _ = os.ReadFile(keyF.Name())
|
rb, _ = os.ReadFile(keyF.Name())
|
||||||
k, _ := pem.Decode(rb)
|
k, _ := pem.Decode(rb)
|
||||||
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
|
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
// we won't know salt in advance, so just check start of string
|
// we won't know salt in advance, so just check start of string
|
||||||
assert.Equal(t, uint32(2*1024*1024), ned.EncryptionMetadata.Argon2Parameters.Memory)
|
assert.Equal(t, uint32(2*1024*1024), ned.EncryptionMetadata.Argon2Parameters.Memory)
|
||||||
assert.Equal(t, uint8(4), ned.EncryptionMetadata.Argon2Parameters.Parallelism)
|
assert.Equal(t, uint8(4), ned.EncryptionMetadata.Argon2Parameters.Parallelism)
|
||||||
@ -185,8 +178,8 @@ func Test_ca(t *testing.T) {
|
|||||||
var curve cert.Curve
|
var curve cert.Curve
|
||||||
curve, lKey, b, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rb)
|
curve, lKey, b, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rb)
|
||||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Empty(t, b)
|
assert.Len(t, b, 0)
|
||||||
assert.Len(t, lKey, 64)
|
assert.Len(t, lKey, 64)
|
||||||
|
|
||||||
// test when reading passsword results in an error
|
// test when reading passsword results in an error
|
||||||
@ -194,45 +187,45 @@ func Test_ca(t *testing.T) {
|
|||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
require.Error(t, ca(args, ob, eb, errpw))
|
assert.Error(t, ca(args, ob, eb, errpw))
|
||||||
assert.Equal(t, pwPromptOb, ob.String())
|
assert.Equal(t, pwPromptOb, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// test when user fails to enter a password
|
// test when user fails to enter a password
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
require.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
assert.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||||
assert.Equal(t, strings.Repeat(pwPromptOb, 5), ob.String()) // prompts 5 times before giving up
|
assert.Equal(t, strings.Repeat(pwPromptOb, 5), ob.String()) // prompts 5 times before giving up
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create valid cert/key for overwrite tests
|
// create valid cert/key for overwrite tests
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
require.NoError(t, ca(args, ob, eb, nopw))
|
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing certificate file
|
// test that we won't overwrite existing certificate file
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
require.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name())
|
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// test that we won't overwrite existing key file
|
// test that we won't overwrite existing key file
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||||
require.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name())
|
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,8 +6,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/pkclient"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -15,8 +13,8 @@ type keygenFlags struct {
|
|||||||
set *flag.FlagSet
|
set *flag.FlagSet
|
||||||
outKeyPath *string
|
outKeyPath *string
|
||||||
outPubPath *string
|
outPubPath *string
|
||||||
curve *string
|
|
||||||
p11url *string
|
curve *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newKeygenFlags() *keygenFlags {
|
func newKeygenFlags() *keygenFlags {
|
||||||
@ -25,7 +23,6 @@ func newKeygenFlags() *keygenFlags {
|
|||||||
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
|
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
|
||||||
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
|
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
|
||||||
cf.curve = cf.set.String("curve", "25519", "ECDH Curve (25519, P256)")
|
cf.curve = cf.set.String("curve", "25519", "ECDH Curve (25519, P256)")
|
||||||
cf.p11url = p11Flag(cf.set)
|
|
||||||
return &cf
|
return &cf
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,58 +33,32 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
isP11 := len(*cf.p11url) > 0
|
if err := mustFlagString("out-key", cf.outKeyPath); err != nil {
|
||||||
|
return err
|
||||||
if !isP11 {
|
|
||||||
if err = mustFlagString("out-key", cf.outKeyPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err = mustFlagString("out-pub", cf.outPubPath); err != nil {
|
if err := mustFlagString("out-pub", cf.outPubPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var pub, rawPriv []byte
|
var pub, rawPriv []byte
|
||||||
var curve cert.Curve
|
var curve cert.Curve
|
||||||
if isP11 {
|
switch *cf.curve {
|
||||||
switch *cf.curve {
|
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||||
case "P256":
|
pub, rawPriv = x25519Keypair()
|
||||||
curve = cert.Curve_P256
|
curve = cert.Curve_CURVE25519
|
||||||
default:
|
case "P256":
|
||||||
return fmt.Errorf("invalid curve for PKCS#11: %s", *cf.curve)
|
pub, rawPriv = p256Keypair()
|
||||||
}
|
curve = cert.Curve_P256
|
||||||
} else {
|
default:
|
||||||
switch *cf.curve {
|
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||||
case "25519", "X25519", "Curve25519", "CURVE25519":
|
|
||||||
pub, rawPriv = x25519Keypair()
|
|
||||||
curve = cert.Curve_CURVE25519
|
|
||||||
case "P256":
|
|
||||||
pub, rawPriv = p256Keypair()
|
|
||||||
curve = cert.Curve_P256
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if isP11 {
|
err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||||
p11Client, err := pkclient.FromUrl(*cf.p11url)
|
if err != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
return fmt.Errorf("error while creating PKCS#11 client: %w", err)
|
|
||||||
}
|
|
||||||
defer func(client *pkclient.PKClient) {
|
|
||||||
_ = client.Close()
|
|
||||||
}(p11Client)
|
|
||||||
pub, err = p11Client.GetPubKey()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while getting public key: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKeyToPEM(curve, rawPriv), 0600)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKeyToPEM(curve, pub), 0600)
|
|
||||||
|
err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-pub: %s", err)
|
return fmt.Errorf("error while writing out-pub: %s", err)
|
||||||
}
|
}
|
||||||
@ -101,7 +72,7 @@ func keygenSummary() string {
|
|||||||
|
|
||||||
func keygenHelp(out io.Writer) {
|
func keygenHelp(out io.Writer) {
|
||||||
cf := newKeygenFlags()
|
cf := newKeygenFlags()
|
||||||
_, _ = out.Write([]byte("Usage of " + os.Args[0] + " " + keygenSummary() + "\n"))
|
out.Write([]byte("Usage of " + os.Args[0] + " " + keygenSummary() + "\n"))
|
||||||
cf.set.SetOutput(out)
|
cf.set.SetOutput(out)
|
||||||
cf.set.PrintDefaults()
|
cf.set.PrintDefaults()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,9 +7,10 @@ import (
|
|||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: test file permissions
|
||||||
|
|
||||||
func Test_keygenSummary(t *testing.T) {
|
func Test_keygenSummary(t *testing.T) {
|
||||||
assert.Equal(t, "keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`", keygenSummary())
|
assert.Equal(t, "keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`", keygenSummary())
|
||||||
}
|
}
|
||||||
@ -25,8 +26,7 @@ func Test_keygenHelp(t *testing.T) {
|
|||||||
" -out-key string\n"+
|
" -out-key string\n"+
|
||||||
" \tRequired: path to write the private key to\n"+
|
" \tRequired: path to write the private key to\n"+
|
||||||
" -out-pub string\n"+
|
" -out-pub string\n"+
|
||||||
" \tRequired: path to write the public key to\n"+
|
" \tRequired: path to write the public key to\n",
|
||||||
optionalPkcs11String(" -pkcs11 string\n \tOptional: PKCS#11 URI to an existing private key\n"),
|
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -37,59 +37,57 @@ func Test_keygen(t *testing.T) {
|
|||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, keygen([]string{"-out-pub", "nope"}, ob, eb), "-out-key is required")
|
assertHelpError(t, keygen([]string{"-out-pub", "nope"}, ob, eb), "-out-key is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
assertHelpError(t, keygen([]string{"-out-key", "nope"}, ob, eb), "-out-pub is required")
|
assertHelpError(t, keygen([]string{"-out-key", "nope"}, ob, eb), "-out-pub is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// failed key write
|
// failed key write
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args := []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", "/do/not/write/pleasekey"}
|
args := []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", "/do/not/write/pleasekey"}
|
||||||
require.EqualError(t, keygen(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
assert.EqualError(t, keygen(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp key file
|
// create temp key file
|
||||||
keyF, err := os.CreateTemp("", "test.key")
|
keyF, err := os.CreateTemp("", "test.key")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(keyF.Name())
|
defer os.Remove(keyF.Name())
|
||||||
|
|
||||||
// failed pub write
|
// failed pub write
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", keyF.Name()}
|
args = []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", keyF.Name()}
|
||||||
require.EqualError(t, keygen(args, ob, eb), "error while writing out-pub: open /do/not/write/pleasepub: "+NoSuchDirError)
|
assert.EqualError(t, keygen(args, ob, eb), "error while writing out-pub: open /do/not/write/pleasepub: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// create temp pub file
|
// create temp pub file
|
||||||
pubF, err := os.CreateTemp("", "test.pub")
|
pubF, err := os.CreateTemp("", "test.pub")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(pubF.Name())
|
defer os.Remove(pubF.Name())
|
||||||
|
|
||||||
// test proper keygen
|
// test proper keygen
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-out-pub", pubF.Name(), "-out-key", keyF.Name()}
|
args = []string{"-out-pub", pubF.Name(), "-out-key", keyF.Name()}
|
||||||
require.NoError(t, keygen(args, ob, eb))
|
assert.Nil(t, keygen(args, ob, eb))
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// read cert and key files
|
// read cert and key files
|
||||||
rb, _ := os.ReadFile(keyF.Name())
|
rb, _ := os.ReadFile(keyF.Name())
|
||||||
lKey, b, curve, err := cert.UnmarshalPrivateKeyFromPEM(rb)
|
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
assert.Len(t, b, 0)
|
||||||
assert.Empty(t, b)
|
assert.Nil(t, err)
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, lKey, 32)
|
assert.Len(t, lKey, 32)
|
||||||
|
|
||||||
rb, _ = os.ReadFile(pubF.Name())
|
rb, _ = os.ReadFile(pubF.Name())
|
||||||
lPub, b, curve, err := cert.UnmarshalPublicKeyFromPEM(rb)
|
lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
|
||||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
assert.Len(t, b, 0)
|
||||||
assert.Empty(t, b)
|
assert.Nil(t, err)
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, lPub, 32)
|
assert.Len(t, lPub, 32)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,7 +17,7 @@ func (he *helpError) Error() string {
|
|||||||
return he.s
|
return he.s
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHelpErrorf(s string, v ...any) error {
|
func newHelpErrorf(s string, v ...interface{}) error {
|
||||||
return &helpError{s: fmt.Sprintf(s, v...)}
|
return &helpError{s: fmt.Sprintf(s, v...)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,15 +3,15 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: all flag parsing continueOnError will print to stderr on its own currently
|
||||||
|
|
||||||
func Test_help(t *testing.T) {
|
func Test_help(t *testing.T) {
|
||||||
expected := "Usage of " + os.Args[0] + " <global flags> <mode>:\n" +
|
expected := "Usage of " + os.Args[0] + " <global flags> <mode>:\n" +
|
||||||
" Global flags:\n" +
|
" Global flags:\n" +
|
||||||
@ -77,16 +77,8 @@ func assertHelpError(t *testing.T, err error, msg string) {
|
|||||||
case *helpError:
|
case *helpError:
|
||||||
// good
|
// good
|
||||||
default:
|
default:
|
||||||
t.Fatal(fmt.Sprintf("err was not a helpError: %q, expected %q", err, msg))
|
t.Fatal("err was not a helpError")
|
||||||
}
|
}
|
||||||
|
|
||||||
require.EqualError(t, err, msg)
|
assert.EqualError(t, err, msg)
|
||||||
}
|
|
||||||
|
|
||||||
func optionalPkcs11String(msg string) string {
|
|
||||||
if p11Supported() {
|
|
||||||
return msg
|
|
||||||
} else {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,15 +0,0 @@
|
|||||||
//go:build cgo && pkcs11
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
)
|
|
||||||
|
|
||||||
func p11Supported() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func p11Flag(set *flag.FlagSet) *string {
|
|
||||||
return set.String("pkcs11", "", "Optional: PKCS#11 URI to an existing private key")
|
|
||||||
}
|
|
||||||
@ -1,16 +0,0 @@
|
|||||||
//go:build !cgo || !pkcs11
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
)
|
|
||||||
|
|
||||||
func p11Supported() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func p11Flag(set *flag.FlagSet) *string {
|
|
||||||
var ret = ""
|
|
||||||
return &ret
|
|
||||||
}
|
|
||||||
@ -45,27 +45,28 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
return fmt.Errorf("unable to read cert; %s", err)
|
return fmt.Errorf("unable to read cert; %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var c cert.Certificate
|
var c *cert.NebulaCertificate
|
||||||
var qrBytes []byte
|
var qrBytes []byte
|
||||||
part := 0
|
part := 0
|
||||||
|
|
||||||
var jsonCerts []cert.Certificate
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
c, rawCert, err = cert.UnmarshalCertificateFromPEM(rawCert)
|
c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while unmarshaling cert: %s", err)
|
return fmt.Errorf("error while unmarshaling cert: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *pf.json {
|
if *pf.json {
|
||||||
jsonCerts = append(jsonCerts, c)
|
b, _ := json.Marshal(c)
|
||||||
|
out.Write(b)
|
||||||
|
out.Write([]byte("\n"))
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
_, _ = out.Write([]byte(c.String()))
|
out.Write([]byte(c.String()))
|
||||||
_, _ = out.Write([]byte("\n"))
|
out.Write([]byte("\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if *pf.outQRPath != "" {
|
if *pf.outQRPath != "" {
|
||||||
b, err := c.MarshalPEM()
|
b, err := c.MarshalToPEM()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while marshalling cert to PEM: %s", err)
|
return fmt.Errorf("error while marshalling cert to PEM: %s", err)
|
||||||
}
|
}
|
||||||
@ -79,12 +80,6 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
part++
|
part++
|
||||||
}
|
}
|
||||||
|
|
||||||
if *pf.json {
|
|
||||||
b, _ := json.Marshal(jsonCerts)
|
|
||||||
_, _ = out.Write(b)
|
|
||||||
_, _ = out.Write([]byte("\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if *pf.outQRPath != "" {
|
if *pf.outQRPath != "" {
|
||||||
b, err := qrcode.Encode(string(qrBytes), qrcode.Medium, -5)
|
b, err := qrcode.Encode(string(qrBytes), qrcode.Medium, -5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -2,17 +2,12 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"net/netip"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_printSummary(t *testing.T) {
|
func Test_printSummary(t *testing.T) {
|
||||||
@ -43,203 +38,84 @@ func Test_printCert(t *testing.T) {
|
|||||||
|
|
||||||
// no path
|
// no path
|
||||||
err := printCert([]string{}, ob, eb)
|
err := printCert([]string{}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
assertHelpError(t, err, "-path is required")
|
assertHelpError(t, err, "-path is required")
|
||||||
|
|
||||||
// no cert at path
|
// no cert at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
err = printCert([]string{"-path", "does_not_exist"}, ob, eb)
|
err = printCert([]string{"-path", "does_not_exist"}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.EqualError(t, err, "unable to read cert; open does_not_exist: "+NoSuchFileError)
|
assert.EqualError(t, err, "unable to read cert; open does_not_exist: "+NoSuchFileError)
|
||||||
|
|
||||||
// invalid cert at path
|
// invalid cert at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
tf, err := os.CreateTemp("", "print-cert")
|
tf, err := os.CreateTemp("", "print-cert")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tf.Name())
|
defer os.Remove(tf.Name())
|
||||||
|
|
||||||
tf.WriteString("-----BEGIN NOPE-----")
|
tf.WriteString("-----BEGIN NOPE-----")
|
||||||
err = printCert([]string{"-path", tf.Name()}, ob, eb)
|
err = printCert([]string{"-path", tf.Name()}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.EqualError(t, err, "error while unmarshaling cert: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "error while unmarshaling cert: input did not contain a valid PEM encoded block")
|
||||||
|
|
||||||
// test multiple certs
|
// test multiple certs
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
tf.Truncate(0)
|
tf.Truncate(0)
|
||||||
tf.Seek(0, 0)
|
tf.Seek(0, 0)
|
||||||
ca, caKey := NewTestCaCert("test ca", nil, nil, time.Time{}, time.Time{}, nil, nil, nil)
|
c := cert.NebulaCertificate{
|
||||||
c, _ := NewTestCert(ca, caKey, "test", time.Time{}, time.Time{}, []netip.Prefix{netip.MustParsePrefix("10.0.0.123/8")}, nil, []string{"hi"})
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test",
|
||||||
|
Groups: []string{"hi"},
|
||||||
|
PublicKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||||
|
},
|
||||||
|
Signature: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||||
|
}
|
||||||
|
|
||||||
p, _ := c.MarshalPEM()
|
p, _ := c.MarshalToPEM()
|
||||||
tf.Write(p)
|
tf.Write(p)
|
||||||
tf.Write(p)
|
tf.Write(p)
|
||||||
tf.Write(p)
|
tf.Write(p)
|
||||||
|
|
||||||
err = printCert([]string{"-path", tf.Name()}, ob, eb)
|
err = printCert([]string{"-path", tf.Name()}, ob, eb)
|
||||||
fp, _ := c.Fingerprint()
|
assert.Nil(t, err)
|
||||||
pk := hex.EncodeToString(c.PublicKey())
|
|
||||||
sig := hex.EncodeToString(c.Signature())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
//"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: "+c.Issuer()+"\n\t\tPublic key: "+pk+"\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: "+fp+"\n\tSignature: "+sig+"\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: "+c.Issuer()+"\n\t\tPublic key: "+pk+"\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: "+fp+"\n\tSignature: "+sig+"\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: "+c.Issuer()+"\n\t\tPublic key: "+pk+"\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: "+fp+"\n\tSignature: "+sig+"\n}\n",
|
"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n",
|
||||||
`{
|
|
||||||
"details": {
|
|
||||||
"curve": "CURVE25519",
|
|
||||||
"groups": [
|
|
||||||
"hi"
|
|
||||||
],
|
|
||||||
"isCa": false,
|
|
||||||
"issuer": "`+c.Issuer()+`",
|
|
||||||
"name": "test",
|
|
||||||
"networks": [
|
|
||||||
"10.0.0.123/8"
|
|
||||||
],
|
|
||||||
"notAfter": "0001-01-01T00:00:00Z",
|
|
||||||
"notBefore": "0001-01-01T00:00:00Z",
|
|
||||||
"publicKey": "`+pk+`",
|
|
||||||
"unsafeNetworks": []
|
|
||||||
},
|
|
||||||
"fingerprint": "`+fp+`",
|
|
||||||
"signature": "`+sig+`",
|
|
||||||
"version": 1
|
|
||||||
}
|
|
||||||
{
|
|
||||||
"details": {
|
|
||||||
"curve": "CURVE25519",
|
|
||||||
"groups": [
|
|
||||||
"hi"
|
|
||||||
],
|
|
||||||
"isCa": false,
|
|
||||||
"issuer": "`+c.Issuer()+`",
|
|
||||||
"name": "test",
|
|
||||||
"networks": [
|
|
||||||
"10.0.0.123/8"
|
|
||||||
],
|
|
||||||
"notAfter": "0001-01-01T00:00:00Z",
|
|
||||||
"notBefore": "0001-01-01T00:00:00Z",
|
|
||||||
"publicKey": "`+pk+`",
|
|
||||||
"unsafeNetworks": []
|
|
||||||
},
|
|
||||||
"fingerprint": "`+fp+`",
|
|
||||||
"signature": "`+sig+`",
|
|
||||||
"version": 1
|
|
||||||
}
|
|
||||||
{
|
|
||||||
"details": {
|
|
||||||
"curve": "CURVE25519",
|
|
||||||
"groups": [
|
|
||||||
"hi"
|
|
||||||
],
|
|
||||||
"isCa": false,
|
|
||||||
"issuer": "`+c.Issuer()+`",
|
|
||||||
"name": "test",
|
|
||||||
"networks": [
|
|
||||||
"10.0.0.123/8"
|
|
||||||
],
|
|
||||||
"notAfter": "0001-01-01T00:00:00Z",
|
|
||||||
"notBefore": "0001-01-01T00:00:00Z",
|
|
||||||
"publicKey": "`+pk+`",
|
|
||||||
"unsafeNetworks": []
|
|
||||||
},
|
|
||||||
"fingerprint": "`+fp+`",
|
|
||||||
"signature": "`+sig+`",
|
|
||||||
"version": 1
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// test json
|
// test json
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
tf.Truncate(0)
|
tf.Truncate(0)
|
||||||
tf.Seek(0, 0)
|
tf.Seek(0, 0)
|
||||||
|
c = cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test",
|
||||||
|
Groups: []string{"hi"},
|
||||||
|
PublicKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||||
|
},
|
||||||
|
Signature: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||||
|
}
|
||||||
|
|
||||||
|
p, _ = c.MarshalToPEM()
|
||||||
tf.Write(p)
|
tf.Write(p)
|
||||||
tf.Write(p)
|
tf.Write(p)
|
||||||
tf.Write(p)
|
tf.Write(p)
|
||||||
|
|
||||||
err = printCert([]string{"-json", "-path", tf.Name()}, ob, eb)
|
err = printCert([]string{"-json", "-path", tf.Name()}, ob, eb)
|
||||||
fp, _ = c.Fingerprint()
|
assert.Nil(t, err)
|
||||||
pk = hex.EncodeToString(c.PublicKey())
|
|
||||||
sig = hex.EncodeToString(c.Signature())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
`[{"details":{"curve":"CURVE25519","groups":["hi"],"isCa":false,"issuer":"`+c.Issuer()+`","name":"test","networks":["10.0.0.123/8"],"notAfter":"0001-01-01T00:00:00Z","notBefore":"0001-01-01T00:00:00Z","publicKey":"`+pk+`","unsafeNetworks":[]},"fingerprint":"`+fp+`","signature":"`+sig+`","version":1},{"details":{"curve":"CURVE25519","groups":["hi"],"isCa":false,"issuer":"`+c.Issuer()+`","name":"test","networks":["10.0.0.123/8"],"notAfter":"0001-01-01T00:00:00Z","notBefore":"0001-01-01T00:00:00Z","publicKey":"`+pk+`","unsafeNetworks":[]},"fingerprint":"`+fp+`","signature":"`+sig+`","version":1},{"details":{"curve":"CURVE25519","groups":["hi"],"isCa":false,"issuer":"`+c.Issuer()+`","name":"test","networks":["10.0.0.123/8"],"notAfter":"0001-01-01T00:00:00Z","notBefore":"0001-01-01T00:00:00Z","publicKey":"`+pk+`","unsafeNetworks":[]},"fingerprint":"`+fp+`","signature":"`+sig+`","version":1}]
|
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n",
|
||||||
`,
|
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
}
|
|
||||||
|
|
||||||
// NewTestCaCert will generate a CA cert
|
|
||||||
func NewTestCaCert(name string, pubKey, privKey []byte, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte) {
|
|
||||||
var err error
|
|
||||||
if pubKey == nil || privKey == nil {
|
|
||||||
pubKey, privKey, err = ed25519.GenerateKey(rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &cert.TBSCertificate{
|
|
||||||
Version: cert.Version1,
|
|
||||||
Name: name,
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pubKey,
|
|
||||||
Networks: networks,
|
|
||||||
UnsafeNetworks: unsafeNetworks,
|
|
||||||
Groups: groups,
|
|
||||||
IsCA: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := t.Sign(nil, cert.Curve_CURVE25519, privKey)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, privKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTestCert(ca cert.Certificate, signerKey []byte, name string, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte) {
|
|
||||||
if before.IsZero() {
|
|
||||||
before = ca.NotBefore()
|
|
||||||
}
|
|
||||||
|
|
||||||
if after.IsZero() {
|
|
||||||
after = ca.NotAfter()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(networks) == 0 {
|
|
||||||
networks = []netip.Prefix{netip.MustParsePrefix("10.0.0.123/8")}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub, rawPriv := x25519Keypair()
|
|
||||||
nc := &cert.TBSCertificate{
|
|
||||||
Version: cert.Version1,
|
|
||||||
Name: name,
|
|
||||||
Networks: networks,
|
|
||||||
UnsafeNetworks: unsafeNetworks,
|
|
||||||
Groups: groups,
|
|
||||||
NotBefore: time.Unix(before.Unix(), 0),
|
|
||||||
NotAfter: time.Unix(after.Unix(), 0),
|
|
||||||
PublicKey: pub,
|
|
||||||
IsCA: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := nc.Sign(ca, ca.Curve(), signerKey)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, rawPriv
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,63 +3,50 @@ package main
|
|||||||
import (
|
import (
|
||||||
"crypto/ecdh"
|
"crypto/ecdh"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/skip2/go-qrcode"
|
"github.com/skip2/go-qrcode"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/pkclient"
|
|
||||||
"golang.org/x/crypto/curve25519"
|
"golang.org/x/crypto/curve25519"
|
||||||
)
|
)
|
||||||
|
|
||||||
type signFlags struct {
|
type signFlags struct {
|
||||||
set *flag.FlagSet
|
set *flag.FlagSet
|
||||||
version *uint
|
caKeyPath *string
|
||||||
caKeyPath *string
|
caCertPath *string
|
||||||
caCertPath *string
|
name *string
|
||||||
name *string
|
ip *string
|
||||||
networks *string
|
duration *time.Duration
|
||||||
unsafeNetworks *string
|
inPubPath *string
|
||||||
duration *time.Duration
|
outKeyPath *string
|
||||||
inPubPath *string
|
outCertPath *string
|
||||||
outKeyPath *string
|
outQRPath *string
|
||||||
outCertPath *string
|
groups *string
|
||||||
outQRPath *string
|
subnets *string
|
||||||
groups *string
|
|
||||||
|
|
||||||
p11url *string
|
|
||||||
|
|
||||||
// Deprecated options
|
|
||||||
ip *string
|
|
||||||
subnets *string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSignFlags() *signFlags {
|
func newSignFlags() *signFlags {
|
||||||
sf := signFlags{set: flag.NewFlagSet("sign", flag.ContinueOnError)}
|
sf := signFlags{set: flag.NewFlagSet("sign", flag.ContinueOnError)}
|
||||||
sf.set.Usage = func() {}
|
sf.set.Usage = func() {}
|
||||||
sf.version = sf.set.Uint("version", 0, "Optional: version of the certificate format to use, the default is to create both v1 and v2 certificates.")
|
|
||||||
sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key")
|
sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key")
|
||||||
sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert")
|
sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert")
|
||||||
sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname")
|
sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname")
|
||||||
sf.networks = sf.set.String("networks", "", "Required: comma separated list of ip address and network in CIDR notation to assign to this cert")
|
sf.ip = sf.set.String("ip", "", "Required: ipv4 address and network in CIDR notation to assign the cert")
|
||||||
sf.unsafeNetworks = sf.set.String("unsafe-networks", "", "Optional: comma separated list of ip address and network in CIDR notation. Unsafe networks this cert can route for")
|
|
||||||
sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
||||||
sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key")
|
sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key")
|
||||||
sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to")
|
sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to")
|
||||||
sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to")
|
sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to")
|
||||||
sf.outQRPath = sf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
sf.outQRPath = sf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
||||||
sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups")
|
sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups")
|
||||||
sf.p11url = p11Flag(sf.set)
|
sf.subnets = sf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for")
|
||||||
|
|
||||||
sf.ip = sf.set.String("ip", "", "Deprecated, see -networks")
|
|
||||||
sf.subnets = sf.set.String("subnets", "", "Deprecated, see -unsafe-networks")
|
|
||||||
return &sf
|
return &sf
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
|
func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
|
||||||
@ -69,12 +56,8 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
isP11 := len(*sf.p11url) > 0
|
if err := mustFlagString("ca-key", sf.caKeyPath); err != nil {
|
||||||
|
return err
|
||||||
if !isP11 {
|
|
||||||
if err := mustFlagString("ca-key", sf.caKeyPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err := mustFlagString("ca-crt", sf.caCertPath); err != nil {
|
if err := mustFlagString("ca-crt", sf.caCertPath); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -82,67 +65,50 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||||||
if err := mustFlagString("name", sf.name); err != nil {
|
if err := mustFlagString("name", sf.name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !isP11 && *sf.inPubPath != "" && *sf.outKeyPath != "" {
|
if err := mustFlagString("ip", sf.ip); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if *sf.inPubPath != "" && *sf.outKeyPath != "" {
|
||||||
return newHelpErrorf("cannot set both -in-pub and -out-key")
|
return newHelpErrorf("cannot set both -in-pub and -out-key")
|
||||||
}
|
}
|
||||||
|
|
||||||
var v4Networks []netip.Prefix
|
rawCAKey, err := os.ReadFile(*sf.caKeyPath)
|
||||||
var v6Networks []netip.Prefix
|
if err != nil {
|
||||||
if *sf.networks == "" && *sf.ip != "" {
|
return fmt.Errorf("error while reading ca-key: %s", err)
|
||||||
// Pull up deprecated -ip flag if needed
|
|
||||||
*sf.networks = *sf.ip
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(*sf.networks) == 0 {
|
|
||||||
return newHelpErrorf("-networks is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
version := cert.Version(*sf.version)
|
|
||||||
if version != 0 && version != cert.Version1 && version != cert.Version2 {
|
|
||||||
return newHelpErrorf("-version must be either %v or %v", cert.Version1, cert.Version2)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var curve cert.Curve
|
var curve cert.Curve
|
||||||
var caKey []byte
|
var caKey []byte
|
||||||
|
|
||||||
if !isP11 {
|
// naively attempt to decode the private key as though it is not encrypted
|
||||||
var rawCAKey []byte
|
caKey, _, curve, err = cert.UnmarshalSigningPrivateKey(rawCAKey)
|
||||||
rawCAKey, err := os.ReadFile(*sf.caKeyPath)
|
if err == cert.ErrPrivateKeyEncrypted {
|
||||||
|
// ask for a passphrase until we get one
|
||||||
|
var passphrase []byte
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
out.Write([]byte("Enter passphrase: "))
|
||||||
|
passphrase, err = pr.ReadPassword()
|
||||||
|
|
||||||
|
if err == ErrNoTerminal {
|
||||||
|
return fmt.Errorf("ca-key is encrypted and must be decrypted interactively")
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("error reading password: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(passphrase) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
return fmt.Errorf("cannot open encrypted ca-key without passphrase")
|
||||||
|
}
|
||||||
|
|
||||||
|
curve, caKey, _, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rawCAKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading ca-key: %s", err)
|
return fmt.Errorf("error while parsing encrypted ca-key: %s", err)
|
||||||
}
|
|
||||||
|
|
||||||
// naively attempt to decode the private key as though it is not encrypted
|
|
||||||
caKey, _, curve, err = cert.UnmarshalSigningPrivateKeyFromPEM(rawCAKey)
|
|
||||||
if errors.Is(err, cert.ErrPrivateKeyEncrypted) {
|
|
||||||
// ask for a passphrase until we get one
|
|
||||||
var passphrase []byte
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
out.Write([]byte("Enter passphrase: "))
|
|
||||||
passphrase, err = pr.ReadPassword()
|
|
||||||
|
|
||||||
if errors.Is(err, ErrNoTerminal) {
|
|
||||||
return fmt.Errorf("ca-key is encrypted and must be decrypted interactively")
|
|
||||||
} else if err != nil {
|
|
||||||
return fmt.Errorf("error reading password: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(passphrase) > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(passphrase) == 0 {
|
|
||||||
return fmt.Errorf("cannot open encrypted ca-key without passphrase")
|
|
||||||
}
|
|
||||||
|
|
||||||
curve, caKey, _, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rawCAKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while parsing encrypted ca-key: %s", err)
|
|
||||||
}
|
|
||||||
} else if err != nil {
|
|
||||||
return fmt.Errorf("error while parsing ca-key: %s", err)
|
|
||||||
}
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("error while parsing ca-key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawCACert, err := os.ReadFile(*sf.caCertPath)
|
rawCACert, err := os.ReadFile(*sf.caCertPath)
|
||||||
@ -150,15 +116,18 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||||||
return fmt.Errorf("error while reading ca-crt: %s", err)
|
return fmt.Errorf("error while reading ca-crt: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
caCert, _, err := cert.UnmarshalCertificateFromPEM(rawCACert)
|
caCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCACert)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isP11 {
|
if err := caCert.VerifyPrivateKey(curve, caKey); err != nil {
|
||||||
if err := caCert.VerifyPrivateKey(curve, caKey); err != nil {
|
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
||||||
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
}
|
||||||
}
|
|
||||||
|
issuer, err := caCert.Sha256Sum()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if caCert.Expired(time.Now()) {
|
if caCert.Expired(time.Now()) {
|
||||||
@ -167,53 +136,19 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||||||
|
|
||||||
// if no duration is given, expire one second before the root expires
|
// if no duration is given, expire one second before the root expires
|
||||||
if *sf.duration <= 0 {
|
if *sf.duration <= 0 {
|
||||||
*sf.duration = time.Until(caCert.NotAfter()) - time.Second*1
|
*sf.duration = time.Until(caCert.Details.NotAfter) - time.Second*1
|
||||||
}
|
}
|
||||||
|
|
||||||
if *sf.networks != "" {
|
ip, ipNet, err := net.ParseCIDR(*sf.ip)
|
||||||
for _, rs := range strings.Split(*sf.networks, ",") {
|
if err != nil {
|
||||||
rs := strings.Trim(rs, " ")
|
return newHelpErrorf("invalid ip definition: %s", err)
|
||||||
if rs != "" {
|
|
||||||
n, err := netip.ParsePrefix(rs)
|
|
||||||
if err != nil {
|
|
||||||
return newHelpErrorf("invalid -networks definition: %s", rs)
|
|
||||||
}
|
|
||||||
|
|
||||||
if n.Addr().Is4() {
|
|
||||||
v4Networks = append(v4Networks, n)
|
|
||||||
} else {
|
|
||||||
v6Networks = append(v6Networks, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if ip.To4() == nil {
|
||||||
var v4UnsafeNetworks []netip.Prefix
|
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", *sf.ip)
|
||||||
var v6UnsafeNetworks []netip.Prefix
|
|
||||||
if *sf.unsafeNetworks == "" && *sf.subnets != "" {
|
|
||||||
// Pull up deprecated -subnets flag if needed
|
|
||||||
*sf.unsafeNetworks = *sf.subnets
|
|
||||||
}
|
}
|
||||||
|
ipNet.IP = ip
|
||||||
|
|
||||||
if *sf.unsafeNetworks != "" {
|
groups := []string{}
|
||||||
for _, rs := range strings.Split(*sf.unsafeNetworks, ",") {
|
|
||||||
rs := strings.Trim(rs, " ")
|
|
||||||
if rs != "" {
|
|
||||||
n, err := netip.ParsePrefix(rs)
|
|
||||||
if err != nil {
|
|
||||||
return newHelpErrorf("invalid -unsafe-networks definition: %s", rs)
|
|
||||||
}
|
|
||||||
|
|
||||||
if n.Addr().Is4() {
|
|
||||||
v4UnsafeNetworks = append(v4UnsafeNetworks, n)
|
|
||||||
} else {
|
|
||||||
v6UnsafeNetworks = append(v6UnsafeNetworks, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var groups []string
|
|
||||||
if *sf.groups != "" {
|
if *sf.groups != "" {
|
||||||
for _, rg := range strings.Split(*sf.groups, ",") {
|
for _, rg := range strings.Split(*sf.groups, ",") {
|
||||||
g := strings.TrimSpace(rg)
|
g := strings.TrimSpace(rg)
|
||||||
@ -223,43 +158,60 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var pub, rawPriv []byte
|
subnets := []*net.IPNet{}
|
||||||
var p11Client *pkclient.PKClient
|
if *sf.subnets != "" {
|
||||||
|
for _, rs := range strings.Split(*sf.subnets, ",") {
|
||||||
if isP11 {
|
rs := strings.Trim(rs, " ")
|
||||||
curve = cert.Curve_P256
|
if rs != "" {
|
||||||
p11Client, err = pkclient.FromUrl(*sf.p11url)
|
_, s, err := net.ParseCIDR(rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while creating PKCS#11 client: %w", err)
|
return newHelpErrorf("invalid subnet definition: %s", err)
|
||||||
|
}
|
||||||
|
if s.IP.To4() == nil {
|
||||||
|
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
|
||||||
|
}
|
||||||
|
subnets = append(subnets, s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
defer func(client *pkclient.PKClient) {
|
|
||||||
_ = client.Close()
|
|
||||||
}(p11Client)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var pub, rawPriv []byte
|
||||||
if *sf.inPubPath != "" {
|
if *sf.inPubPath != "" {
|
||||||
var pubCurve cert.Curve
|
|
||||||
rawPub, err := os.ReadFile(*sf.inPubPath)
|
rawPub, err := os.ReadFile(*sf.inPubPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading in-pub: %s", err)
|
return fmt.Errorf("error while reading in-pub: %s", err)
|
||||||
}
|
}
|
||||||
|
var pubCurve cert.Curve
|
||||||
pub, _, pubCurve, err = cert.UnmarshalPublicKeyFromPEM(rawPub)
|
pub, _, pubCurve, err = cert.UnmarshalPublicKey(rawPub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while parsing in-pub: %s", err)
|
return fmt.Errorf("error while parsing in-pub: %s", err)
|
||||||
}
|
}
|
||||||
if pubCurve != curve {
|
if pubCurve != curve {
|
||||||
return fmt.Errorf("curve of in-pub does not match ca")
|
return fmt.Errorf("curve of in-pub does not match ca")
|
||||||
}
|
}
|
||||||
} else if isP11 {
|
|
||||||
pub, err = p11Client.GetPubKey()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while getting public key with PKCS#11: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
pub, rawPriv = newKeypair(curve)
|
pub, rawPriv = newKeypair(curve)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nc := cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: *sf.name,
|
||||||
|
Ips: []*net.IPNet{ipNet},
|
||||||
|
Groups: groups,
|
||||||
|
Subnets: subnets,
|
||||||
|
NotBefore: time.Now(),
|
||||||
|
NotAfter: time.Now().Add(*sf.duration),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: issuer,
|
||||||
|
Curve: curve,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := nc.CheckRootConstrains(caCert); err != nil {
|
||||||
|
return fmt.Errorf("refusing to sign, root certificate constraints violated: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
if *sf.outKeyPath == "" {
|
if *sf.outKeyPath == "" {
|
||||||
*sf.outKeyPath = *sf.name + ".key"
|
*sf.outKeyPath = *sf.name + ".key"
|
||||||
}
|
}
|
||||||
@ -272,105 +224,25 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||||||
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
|
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
var crts []cert.Certificate
|
err = nc.Sign(curve, caKey)
|
||||||
|
if err != nil {
|
||||||
notBefore := time.Now()
|
return fmt.Errorf("error while signing: %s", err)
|
||||||
notAfter := notBefore.Add(*sf.duration)
|
|
||||||
|
|
||||||
if version == 0 || version == cert.Version1 {
|
|
||||||
// Make sure we at least have an ip
|
|
||||||
if len(v4Networks) != 1 {
|
|
||||||
return newHelpErrorf("invalid -networks definition: v1 certificates can only have a single ipv4 address")
|
|
||||||
}
|
|
||||||
|
|
||||||
if version == cert.Version1 {
|
|
||||||
// If we are asked to mint a v1 certificate only then we cant just ignore any v6 addresses
|
|
||||||
if len(v6Networks) > 0 {
|
|
||||||
return newHelpErrorf("invalid -networks definition: v1 certificates can only be ipv4")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(v6UnsafeNetworks) > 0 {
|
|
||||||
return newHelpErrorf("invalid -unsafe-networks definition: v1 certificates can only be ipv4")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &cert.TBSCertificate{
|
|
||||||
Version: cert.Version1,
|
|
||||||
Name: *sf.name,
|
|
||||||
Networks: []netip.Prefix{v4Networks[0]},
|
|
||||||
Groups: groups,
|
|
||||||
UnsafeNetworks: v4UnsafeNetworks,
|
|
||||||
NotBefore: notBefore,
|
|
||||||
NotAfter: notAfter,
|
|
||||||
PublicKey: pub,
|
|
||||||
IsCA: false,
|
|
||||||
Curve: curve,
|
|
||||||
}
|
|
||||||
|
|
||||||
var nc cert.Certificate
|
|
||||||
if p11Client == nil {
|
|
||||||
nc, err = t.Sign(caCert, curve, caKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while signing: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
nc, err = t.SignWith(caCert, curve, p11Client.SignASN1)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while signing with PKCS#11: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
crts = append(crts, nc)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if version == 0 || version == cert.Version2 {
|
if *sf.inPubPath == "" {
|
||||||
t := &cert.TBSCertificate{
|
|
||||||
Version: cert.Version2,
|
|
||||||
Name: *sf.name,
|
|
||||||
Networks: append(v4Networks, v6Networks...),
|
|
||||||
Groups: groups,
|
|
||||||
UnsafeNetworks: append(v4UnsafeNetworks, v6UnsafeNetworks...),
|
|
||||||
NotBefore: notBefore,
|
|
||||||
NotAfter: notAfter,
|
|
||||||
PublicKey: pub,
|
|
||||||
IsCA: false,
|
|
||||||
Curve: curve,
|
|
||||||
}
|
|
||||||
|
|
||||||
var nc cert.Certificate
|
|
||||||
if p11Client == nil {
|
|
||||||
nc, err = t.Sign(caCert, curve, caKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while signing: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
nc, err = t.SignWith(caCert, curve, p11Client.SignASN1)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while signing with PKCS#11: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
crts = append(crts, nc)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isP11 && *sf.inPubPath == "" {
|
|
||||||
if _, err := os.Stat(*sf.outKeyPath); err == nil {
|
if _, err := os.Stat(*sf.outKeyPath); err == nil {
|
||||||
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKeyToPEM(curve, rawPriv), 0600)
|
err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while writing out-key: %s", err)
|
return fmt.Errorf("error while writing out-key: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var b []byte
|
b, err := nc.MarshalToPEM()
|
||||||
for _, c := range crts {
|
if err != nil {
|
||||||
sb, err := c.MarshalPEM()
|
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
|
||||||
}
|
|
||||||
b = append(b, sb...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.WriteFile(*sf.outCertPath, b, 0600)
|
err = os.WriteFile(*sf.outCertPath, b, 0600)
|
||||||
|
|||||||
@ -13,10 +13,11 @@ import (
|
|||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: test file permissions
|
||||||
|
|
||||||
func Test_signSummary(t *testing.T) {
|
func Test_signSummary(t *testing.T) {
|
||||||
assert.Equal(t, "sign <flags>: create and sign a certificate", signSummary())
|
assert.Equal(t, "sign <flags>: create and sign a certificate", signSummary())
|
||||||
}
|
}
|
||||||
@ -38,24 +39,17 @@ func Test_signHelp(t *testing.T) {
|
|||||||
" -in-pub string\n"+
|
" -in-pub string\n"+
|
||||||
" \tOptional (if out-key not set): path to read a previously generated public key\n"+
|
" \tOptional (if out-key not set): path to read a previously generated public key\n"+
|
||||||
" -ip string\n"+
|
" -ip string\n"+
|
||||||
" \tDeprecated, see -networks\n"+
|
" \tRequired: ipv4 address and network in CIDR notation to assign the cert\n"+
|
||||||
" -name string\n"+
|
" -name string\n"+
|
||||||
" \tRequired: name of the cert, usually a hostname\n"+
|
" \tRequired: name of the cert, usually a hostname\n"+
|
||||||
" -networks string\n"+
|
|
||||||
" \tRequired: comma separated list of ip address and network in CIDR notation to assign to this cert\n"+
|
|
||||||
" -out-crt string\n"+
|
" -out-crt string\n"+
|
||||||
" \tOptional: path to write the certificate to\n"+
|
" \tOptional: path to write the certificate to\n"+
|
||||||
" -out-key string\n"+
|
" -out-key string\n"+
|
||||||
" \tOptional (if in-pub not set): path to write the private key to\n"+
|
" \tOptional (if in-pub not set): path to write the private key to\n"+
|
||||||
" -out-qr string\n"+
|
" -out-qr string\n"+
|
||||||
" \tOptional: output a qr code image (png) of the certificate\n"+
|
" \tOptional: output a qr code image (png) of the certificate\n"+
|
||||||
optionalPkcs11String(" -pkcs11 string\n \tOptional: PKCS#11 URI to an existing private key\n")+
|
|
||||||
" -subnets string\n"+
|
" -subnets string\n"+
|
||||||
" \tDeprecated, see -unsafe-networks\n"+
|
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for\n",
|
||||||
" -unsafe-networks string\n"+
|
|
||||||
" \tOptional: comma separated list of ip address and network in CIDR notation. Unsafe networks this cert can route for\n"+
|
|
||||||
" -version uint\n"+
|
|
||||||
" \tOptional: version of the certificate format to use, the default is to create both v1 and v2 certificates.\n",
|
|
||||||
ob.String(),
|
ob.String(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -82,20 +76,20 @@ func Test_signCert(t *testing.T) {
|
|||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, signCert(
|
assertHelpError(t, signCert(
|
||||||
[]string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||||
), "-name is required")
|
), "-name is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
assertHelpError(t, signCert(
|
assertHelpError(t, signCert(
|
||||||
[]string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||||
), "-networks is required")
|
), "-ip is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// cannot set -in-pub and -out-key
|
// cannot set -in-pub and -out-key
|
||||||
assertHelpError(t, signCert(
|
assertHelpError(t, signCert(
|
||||||
[]string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb, nopw,
|
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb, nopw,
|
||||||
), "cannot set both -in-pub and -out-key")
|
), "cannot set both -in-pub and -out-key")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
@ -103,18 +97,18 @@ func Test_signCert(t *testing.T) {
|
|||||||
// failed to read key
|
// failed to read key
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args := []string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
||||||
|
|
||||||
// failed to unmarshal key
|
// failed to unmarshal key
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
caKeyF, err := os.CreateTemp("", "sign-cert.key")
|
caKeyF, err := os.CreateTemp("", "sign-cert.key")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caKeyF.Name())
|
defer os.Remove(caKeyF.Name())
|
||||||
|
|
||||||
args = []string{"-version", "1", "-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@ -122,11 +116,11 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader)
|
caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
caKeyF.Write(cert.MarshalSigningPrivateKeyToPEM(cert.Curve_CURVE25519, caPriv))
|
caKeyF.Write(cert.MarshalEd25519PrivateKey(caPriv))
|
||||||
|
|
||||||
// failed to read cert
|
// failed to read cert
|
||||||
args = []string{"-version", "1", "-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@ -134,22 +128,30 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
caCrtF, err := os.CreateTemp("", "sign-cert.crt")
|
caCrtF, err := os.CreateTemp("", "sign-cert.crt")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caCrtF.Name())
|
defer os.Remove(caCrtF.Name())
|
||||||
|
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// write a proper ca cert for later
|
// write a proper ca cert for later
|
||||||
ca, _ := NewTestCaCert("ca", caPub, caPriv, time.Now(), time.Now().Add(time.Minute*200), nil, nil, nil)
|
ca := cert.NebulaCertificate{
|
||||||
b, _ := ca.MarshalPEM()
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "ca",
|
||||||
|
NotBefore: time.Now(),
|
||||||
|
NotAfter: time.Now().Add(time.Minute * 200),
|
||||||
|
PublicKey: caPub,
|
||||||
|
IsCA: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
b, _ := ca.MarshalToPEM()
|
||||||
caCrtF.Write(b)
|
caCrtF.Write(b)
|
||||||
|
|
||||||
// failed to read pub
|
// failed to read pub
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@ -157,11 +159,11 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
inPubF, err := os.CreateTemp("", "in.pub")
|
inPubF, err := os.CreateTemp("", "in.pub")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(inPubF.Name())
|
defer os.Remove(inPubF.Name())
|
||||||
|
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@ -169,124 +171,116 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
inPub, _ := x25519Keypair()
|
inPub, _ := x25519Keypair()
|
||||||
inPubF.Write(cert.MarshalPublicKeyToPEM(cert.Curve_CURVE25519, inPub))
|
inPubF.Write(cert.MarshalX25519PublicKey(inPub))
|
||||||
|
|
||||||
// bad ip cidr
|
// bad ip cidr
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -networks definition: a1.1.1.1/24")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: invalid CIDR address: a1.1.1.1/24")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -networks definition: v1 certificates can only have a single ipv4 address")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
|
||||||
assert.Empty(t, eb.String())
|
|
||||||
|
|
||||||
ob.Reset()
|
|
||||||
eb.Reset()
|
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24,1.1.1.2/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
|
||||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -networks definition: v1 certificates can only have a single ipv4 address")
|
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// bad subnet cidr
|
// bad subnet cidr
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -unsafe-networks definition: a")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: invalid CIDR address: a")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
||||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -unsafe-networks definition: v1 certificates can only be ipv4")
|
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// mismatched ca key
|
// mismatched ca key
|
||||||
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
|
caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caKeyF2.Name())
|
defer os.Remove(caKeyF2.Name())
|
||||||
caKeyF2.Write(cert.MarshalSigningPrivateKeyToPEM(cert.Curve_CURVE25519, caPriv2))
|
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
|
||||||
|
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate does not match private key")
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate does not match private key")
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// failed key write
|
// failed key write
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// create temp key file
|
// create temp key file
|
||||||
keyF, err := os.CreateTemp("", "test.key")
|
keyF, err := os.CreateTemp("", "test.key")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
// failed cert write
|
// failed cert write
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
|
|
||||||
// create temp cert file
|
// create temp cert file
|
||||||
crtF, err := os.CreateTemp("", "test.crt")
|
crtF, err := os.CreateTemp("", "test.crt")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
|
|
||||||
// test proper cert with removed empty groups and subnets
|
// test proper cert with removed empty groups and subnets
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// read cert and key files
|
// read cert and key files
|
||||||
rb, _ := os.ReadFile(keyF.Name())
|
rb, _ := os.ReadFile(keyF.Name())
|
||||||
lKey, b, curve, err := cert.UnmarshalPrivateKeyFromPEM(rb)
|
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
assert.Len(t, b, 0)
|
||||||
assert.Empty(t, b)
|
assert.Nil(t, err)
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, lKey, 32)
|
assert.Len(t, lKey, 32)
|
||||||
|
|
||||||
rb, _ = os.ReadFile(crtF.Name())
|
rb, _ = os.ReadFile(crtF.Name())
|
||||||
lCrt, b, err := cert.UnmarshalCertificateFromPEM(rb)
|
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||||
assert.Empty(t, b)
|
assert.Len(t, b, 0)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
assert.Equal(t, "test", lCrt.Name())
|
assert.Equal(t, "test", lCrt.Details.Name)
|
||||||
assert.Equal(t, "1.1.1.1/24", lCrt.Networks()[0].String())
|
assert.Equal(t, "1.1.1.1/24", lCrt.Details.Ips[0].String())
|
||||||
assert.Len(t, lCrt.Networks(), 1)
|
assert.Len(t, lCrt.Details.Ips, 1)
|
||||||
assert.False(t, lCrt.IsCA())
|
assert.False(t, lCrt.Details.IsCA)
|
||||||
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Groups())
|
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Details.Groups)
|
||||||
assert.Len(t, lCrt.UnsafeNetworks(), 3)
|
assert.Len(t, lCrt.Details.Subnets, 3)
|
||||||
assert.Len(t, lCrt.PublicKey(), 32)
|
assert.Len(t, lCrt.Details.PublicKey, 32)
|
||||||
assert.Equal(t, time.Duration(time.Minute*100), lCrt.NotAfter().Sub(lCrt.NotBefore()))
|
assert.Equal(t, time.Duration(time.Minute*100), lCrt.Details.NotAfter.Sub(lCrt.Details.NotBefore))
|
||||||
|
|
||||||
sns := []string{}
|
sns := []string{}
|
||||||
for _, sn := range lCrt.UnsafeNetworks() {
|
for _, sn := range lCrt.Details.Subnets {
|
||||||
sns = append(sns, sn.String())
|
sns = append(sns, sn.String())
|
||||||
}
|
}
|
||||||
assert.Equal(t, []string{"10.1.1.1/32", "10.2.2.2/32", "10.5.5.5/32"}, sns)
|
assert.Equal(t, []string{"10.1.1.1/32", "10.2.2.2/32", "10.5.5.5/32"}, sns)
|
||||||
|
|
||||||
issuer, _ := ca.Fingerprint()
|
issuer, _ := ca.Sha256Sum()
|
||||||
assert.Equal(t, issuer, lCrt.Issuer())
|
assert.Equal(t, issuer, lCrt.Details.Issuer)
|
||||||
|
|
||||||
assert.True(t, lCrt.CheckSignature(caPub))
|
assert.True(t, lCrt.CheckSignature(caPub))
|
||||||
|
|
||||||
@ -295,55 +289,53 @@ func Test_signCert(t *testing.T) {
|
|||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
||||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// read cert file and check pub key matches in-pub
|
// read cert file and check pub key matches in-pub
|
||||||
rb, _ = os.ReadFile(crtF.Name())
|
rb, _ = os.ReadFile(crtF.Name())
|
||||||
lCrt, b, err = cert.UnmarshalCertificateFromPEM(rb)
|
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||||
assert.Empty(t, b)
|
assert.Len(t, b, 0)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, lCrt.PublicKey(), inPub)
|
assert.Equal(t, lCrt.Details.PublicKey, inPub)
|
||||||
|
|
||||||
// test refuse to sign cert with duration beyond root
|
// test refuse to sign cert with duration beyond root
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
os.Remove(keyF.Name())
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
os.Remove(crtF.Name())
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate")
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while signing: certificate expires after signing certificate")
|
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// create valid cert/key for overwrite tests
|
// create valid cert/key for overwrite tests
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing key file
|
// test that we won't overwrite existing key file
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing key: "+keyF.Name())
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing key: "+keyF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
// create valid cert/key for overwrite tests
|
// create valid cert/key for overwrite tests
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
os.Remove(crtF.Name())
|
os.Remove(crtF.Name())
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||||
|
|
||||||
// test that we won't overwrite existing certificate file
|
// test that we won't overwrite existing certificate file
|
||||||
os.Remove(keyF.Name())
|
os.Remove(keyF.Name())
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing cert: "+crtF.Name())
|
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing cert: "+crtF.Name())
|
||||||
assert.Empty(t, ob.String())
|
assert.Empty(t, ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@ -356,11 +348,11 @@ func Test_signCert(t *testing.T) {
|
|||||||
eb.Reset()
|
eb.Reset()
|
||||||
|
|
||||||
caKeyF, err = os.CreateTemp("", "sign-cert.key")
|
caKeyF, err = os.CreateTemp("", "sign-cert.key")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caKeyF.Name())
|
defer os.Remove(caKeyF.Name())
|
||||||
|
|
||||||
caCrtF, err = os.CreateTemp("", "sign-cert.crt")
|
caCrtF, err = os.CreateTemp("", "sign-cert.crt")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caCrtF.Name())
|
defer os.Remove(caCrtF.Name())
|
||||||
|
|
||||||
// generate the encrypted key
|
// generate the encrypted key
|
||||||
@ -369,13 +361,21 @@ func Test_signCert(t *testing.T) {
|
|||||||
b, _ = cert.EncryptAndMarshalSigningPrivateKey(cert.Curve_CURVE25519, caPriv, passphrase, kdfParams)
|
b, _ = cert.EncryptAndMarshalSigningPrivateKey(cert.Curve_CURVE25519, caPriv, passphrase, kdfParams)
|
||||||
caKeyF.Write(b)
|
caKeyF.Write(b)
|
||||||
|
|
||||||
ca, _ = NewTestCaCert("ca", caPub, caPriv, time.Now(), time.Now().Add(time.Minute*200), nil, nil, nil)
|
ca = cert.NebulaCertificate{
|
||||||
b, _ = ca.MarshalPEM()
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "ca",
|
||||||
|
NotBefore: time.Now(),
|
||||||
|
NotAfter: time.Now().Add(time.Minute * 200),
|
||||||
|
PublicKey: caPub,
|
||||||
|
IsCA: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
b, _ = ca.MarshalToPEM()
|
||||||
caCrtF.Write(b)
|
caCrtF.Write(b)
|
||||||
|
|
||||||
// test with the proper password
|
// test with the proper password
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.NoError(t, signCert(args, ob, eb, testpw))
|
assert.Nil(t, signCert(args, ob, eb, testpw))
|
||||||
assert.Equal(t, "Enter passphrase: ", ob.String())
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@ -384,8 +384,8 @@ func Test_signCert(t *testing.T) {
|
|||||||
eb.Reset()
|
eb.Reset()
|
||||||
|
|
||||||
testpw.password = []byte("invalid password")
|
testpw.password = []byte("invalid password")
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.Error(t, signCert(args, ob, eb, testpw))
|
assert.Error(t, signCert(args, ob, eb, testpw))
|
||||||
assert.Equal(t, "Enter passphrase: ", ob.String())
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
|
|
||||||
@ -393,8 +393,8 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
|
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.Error(t, signCert(args, ob, eb, nopw))
|
assert.Error(t, signCert(args, ob, eb, nopw))
|
||||||
// normally the user hitting enter on the prompt would add newlines between these
|
// normally the user hitting enter on the prompt would add newlines between these
|
||||||
assert.Equal(t, "Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: ", ob.String())
|
assert.Equal(t, "Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: ", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
@ -403,8 +403,8 @@ func Test_signCert(t *testing.T) {
|
|||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
|
|
||||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||||
require.Error(t, signCert(args, ob, eb, errpw))
|
assert.Error(t, signCert(args, ob, eb, errpw))
|
||||||
assert.Equal(t, "Enter passphrase: ", ob.String())
|
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Empty(t, eb.String())
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -42,14 +41,14 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
|
|
||||||
rawCACert, err := os.ReadFile(*vf.caPath)
|
rawCACert, err := os.ReadFile(*vf.caPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while reading ca: %w", err)
|
return fmt.Errorf("error while reading ca: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
caPool := cert.NewCAPool()
|
caPool := cert.NewCAPool()
|
||||||
for {
|
for {
|
||||||
rawCACert, err = caPool.AddCAFromPEM(rawCACert)
|
rawCACert, err = caPool.AddCACertificate(rawCACert)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while adding ca cert to pool: %w", err)
|
return fmt.Errorf("error while adding ca cert to pool: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rawCACert == nil || len(rawCACert) == 0 || strings.TrimSpace(string(rawCACert)) == "" {
|
if rawCACert == nil || len(rawCACert) == 0 || strings.TrimSpace(string(rawCACert)) == "" {
|
||||||
@ -59,30 +58,20 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
|||||||
|
|
||||||
rawCert, err := os.ReadFile(*vf.certPath)
|
rawCert, err := os.ReadFile(*vf.certPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to read crt: %w", err)
|
return fmt.Errorf("unable to read crt; %s", err)
|
||||||
}
|
|
||||||
var errs []error
|
|
||||||
for {
|
|
||||||
if len(rawCert) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c, extra, err := cert.UnmarshalCertificateFromPEM(rawCert)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error while parsing crt: %w", err)
|
|
||||||
}
|
|
||||||
rawCert = extra
|
|
||||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
|
||||||
if err != nil {
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, cert.ErrCaNotFound):
|
|
||||||
errs = append(errs, fmt.Errorf("error while verifying certificate v%d %s with issuer %s: %w", c.Version(), c.Name(), c.Issuer(), err))
|
|
||||||
default:
|
|
||||||
errs = append(errs, fmt.Errorf("error while verifying certificate %+v: %w", c, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Join(errs...)
|
c, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while parsing crt: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
good, err := c.Verify(time.Now(), caPool)
|
||||||
|
if !good {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifySummary() string {
|
func verifySummary() string {
|
||||||
@ -91,7 +80,7 @@ func verifySummary() string {
|
|||||||
|
|
||||||
func verifyHelp(out io.Writer) {
|
func verifyHelp(out io.Writer) {
|
||||||
vf := newVerifyFlags()
|
vf := newVerifyFlags()
|
||||||
_, _ = out.Write([]byte("Usage of " + os.Args[0] + " " + verifySummary() + "\n"))
|
out.Write([]byte("Usage of " + os.Args[0] + " " + verifySummary() + "\n"))
|
||||||
vf.set.SetOutput(out)
|
vf.set.SetOutput(out)
|
||||||
vf.set.PrintDefaults()
|
vf.set.PrintDefaults()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,7 +9,6 @@ import (
|
|||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,87 +37,105 @@ func Test_verify(t *testing.T) {
|
|||||||
|
|
||||||
// required args
|
// required args
|
||||||
assertHelpError(t, verify([]string{"-ca", "derp"}, ob, eb), "-crt is required")
|
assertHelpError(t, verify([]string{"-ca", "derp"}, ob, eb), "-crt is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
assertHelpError(t, verify([]string{"-crt", "derp"}, ob, eb), "-ca is required")
|
assertHelpError(t, verify([]string{"-crt", "derp"}, ob, eb), "-ca is required")
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
|
|
||||||
// no ca at path
|
// no ca at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
err := verify([]string{"-ca", "does_not_exist", "-crt", "does_not_exist"}, ob, eb)
|
err := verify([]string{"-ca", "does_not_exist", "-crt", "does_not_exist"}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.EqualError(t, err, "error while reading ca: open does_not_exist: "+NoSuchFileError)
|
assert.EqualError(t, err, "error while reading ca: open does_not_exist: "+NoSuchFileError)
|
||||||
|
|
||||||
// invalid ca at path
|
// invalid ca at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
caFile, err := os.CreateTemp("", "verify-ca")
|
caFile, err := os.CreateTemp("", "verify-ca")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(caFile.Name())
|
defer os.Remove(caFile.Name())
|
||||||
|
|
||||||
caFile.WriteString("-----BEGIN NOPE-----")
|
caFile.WriteString("-----BEGIN NOPE-----")
|
||||||
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.EqualError(t, err, "error while adding ca cert to pool: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "error while adding ca cert to pool: input did not contain a valid PEM encoded block")
|
||||||
|
|
||||||
// make a ca for later
|
// make a ca for later
|
||||||
caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader)
|
caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
ca, _ := NewTestCaCert("test-ca", caPub, caPriv, time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour*2), nil, nil, nil)
|
ca := cert.NebulaCertificate{
|
||||||
b, _ := ca.MarshalPEM()
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test-ca",
|
||||||
|
NotBefore: time.Now().Add(time.Hour * -1),
|
||||||
|
NotAfter: time.Now().Add(time.Hour * 2),
|
||||||
|
PublicKey: caPub,
|
||||||
|
IsCA: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ca.Sign(cert.Curve_CURVE25519, caPriv)
|
||||||
|
b, _ := ca.MarshalToPEM()
|
||||||
caFile.Truncate(0)
|
caFile.Truncate(0)
|
||||||
caFile.Seek(0, 0)
|
caFile.Seek(0, 0)
|
||||||
caFile.Write(b)
|
caFile.Write(b)
|
||||||
|
|
||||||
// no crt at path
|
// no crt at path
|
||||||
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.EqualError(t, err, "unable to read crt: open does_not_exist: "+NoSuchFileError)
|
assert.EqualError(t, err, "unable to read crt; open does_not_exist: "+NoSuchFileError)
|
||||||
|
|
||||||
// invalid crt at path
|
// invalid crt at path
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
eb.Reset()
|
eb.Reset()
|
||||||
certFile, err := os.CreateTemp("", "verify-cert")
|
certFile, err := os.CreateTemp("", "verify-cert")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(certFile.Name())
|
defer os.Remove(certFile.Name())
|
||||||
|
|
||||||
certFile.WriteString("-----BEGIN NOPE-----")
|
certFile.WriteString("-----BEGIN NOPE-----")
|
||||||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.EqualError(t, err, "error while parsing crt: input did not contain a valid PEM encoded block")
|
assert.EqualError(t, err, "error while parsing crt: input did not contain a valid PEM encoded block")
|
||||||
|
|
||||||
// unverifiable cert at path
|
// unverifiable cert at path
|
||||||
crt, _ := NewTestCert(ca, caPriv, "test-cert", time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour), nil, nil, nil)
|
_, badPriv, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
// Slightly evil hack to modify the certificate after it was sealed to generate an invalid signature
|
certPub, _ := x25519Keypair()
|
||||||
pub := crt.PublicKey()
|
signer, _ := ca.Sha256Sum()
|
||||||
for i, _ := range pub {
|
crt := cert.NebulaCertificate{
|
||||||
pub[i] = 0
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test-cert",
|
||||||
|
NotBefore: time.Now().Add(time.Hour * -1),
|
||||||
|
NotAfter: time.Now().Add(time.Hour),
|
||||||
|
PublicKey: certPub,
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: signer,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
b, _ = crt.MarshalPEM()
|
|
||||||
|
crt.Sign(cert.Curve_CURVE25519, badPriv)
|
||||||
|
b, _ = crt.MarshalToPEM()
|
||||||
certFile.Truncate(0)
|
certFile.Truncate(0)
|
||||||
certFile.Seek(0, 0)
|
certFile.Seek(0, 0)
|
||||||
certFile.Write(b)
|
certFile.Write(b)
|
||||||
|
|
||||||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.ErrorIs(t, err, cert.ErrSignatureMismatch)
|
assert.EqualError(t, err, "certificate signature did not match")
|
||||||
|
|
||||||
// verified cert at path
|
// verified cert at path
|
||||||
crt, _ = NewTestCert(ca, caPriv, "test-cert", time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour), nil, nil, nil)
|
crt.Sign(cert.Curve_CURVE25519, caPriv)
|
||||||
b, _ = crt.MarshalPEM()
|
b, _ = crt.MarshalToPEM()
|
||||||
certFile.Truncate(0)
|
certFile.Truncate(0)
|
||||||
certFile.Seek(0, 0)
|
certFile.Seek(0, 0)
|
||||||
certFile.Write(b)
|
certFile.Write(b)
|
||||||
|
|
||||||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
assert.Empty(t, eb.String())
|
assert.Equal(t, "", eb.String())
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,14 +17,14 @@ import (
|
|||||||
|
|
||||||
"dario.cat/mergo"
|
"dario.cat/mergo"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type C struct {
|
type C struct {
|
||||||
path string
|
path string
|
||||||
files []string
|
files []string
|
||||||
Settings map[string]any
|
Settings map[interface{}]interface{}
|
||||||
oldSettings map[string]any
|
oldSettings map[interface{}]interface{}
|
||||||
callbacks []func(*C)
|
callbacks []func(*C)
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
reloadLock sync.Mutex
|
reloadLock sync.Mutex
|
||||||
@ -32,7 +32,7 @@ type C struct {
|
|||||||
|
|
||||||
func NewC(l *logrus.Logger) *C {
|
func NewC(l *logrus.Logger) *C {
|
||||||
return &C{
|
return &C{
|
||||||
Settings: make(map[string]any),
|
Settings: make(map[interface{}]interface{}),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -92,8 +92,8 @@ func (c *C) HasChanged(k string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
nv any
|
nv interface{}
|
||||||
ov any
|
ov interface{}
|
||||||
)
|
)
|
||||||
|
|
||||||
if k == "" {
|
if k == "" {
|
||||||
@ -147,7 +147,7 @@ func (c *C) ReloadConfig() {
|
|||||||
c.reloadLock.Lock()
|
c.reloadLock.Lock()
|
||||||
defer c.reloadLock.Unlock()
|
defer c.reloadLock.Unlock()
|
||||||
|
|
||||||
c.oldSettings = make(map[string]any)
|
c.oldSettings = make(map[interface{}]interface{})
|
||||||
for k, v := range c.Settings {
|
for k, v := range c.Settings {
|
||||||
c.oldSettings[k] = v
|
c.oldSettings[k] = v
|
||||||
}
|
}
|
||||||
@ -167,7 +167,7 @@ func (c *C) ReloadConfigString(raw string) error {
|
|||||||
c.reloadLock.Lock()
|
c.reloadLock.Lock()
|
||||||
defer c.reloadLock.Unlock()
|
defer c.reloadLock.Unlock()
|
||||||
|
|
||||||
c.oldSettings = make(map[string]any)
|
c.oldSettings = make(map[interface{}]interface{})
|
||||||
for k, v := range c.Settings {
|
for k, v := range c.Settings {
|
||||||
c.oldSettings[k] = v
|
c.oldSettings[k] = v
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ func (c *C) GetStringSlice(k string, d []string) []string {
|
|||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
rv, ok := r.([]any)
|
rv, ok := r.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
@ -215,13 +215,13 @@ func (c *C) GetStringSlice(k string, d []string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMap will get the map for k or return the default d if not found or invalid
|
// GetMap will get the map for k or return the default d if not found or invalid
|
||||||
func (c *C) GetMap(k string, d map[string]any) map[string]any {
|
func (c *C) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} {
|
||||||
r := c.Get(k)
|
r := c.Get(k)
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
v, ok := r.(map[string]any)
|
v, ok := r.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
@ -243,7 +243,7 @@ func (c *C) GetInt(k string, d int) int {
|
|||||||
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
|
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
|
||||||
func (c *C) GetUint32(k string, d uint32) uint32 {
|
func (c *C) GetUint32(k string, d uint32) uint32 {
|
||||||
r := c.GetInt(k, int(d))
|
r := c.GetInt(k, int(d))
|
||||||
if r < 0 || uint64(r) > uint64(math.MaxUint32) {
|
if uint64(r) > uint64(math.MaxUint32) {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
return uint32(r)
|
return uint32(r)
|
||||||
@ -266,22 +266,6 @@ func (c *C) GetBool(k string, d bool) bool {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func AsBool(v any) (value bool, ok bool) {
|
|
||||||
switch x := v.(type) {
|
|
||||||
case bool:
|
|
||||||
return x, true
|
|
||||||
case string:
|
|
||||||
switch x {
|
|
||||||
case "y", "yes":
|
|
||||||
return true, true
|
|
||||||
case "n", "no":
|
|
||||||
return false, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDuration will get the duration for k or return the default d if not found or invalid
|
// GetDuration will get the duration for k or return the default d if not found or invalid
|
||||||
func (c *C) GetDuration(k string, d time.Duration) time.Duration {
|
func (c *C) GetDuration(k string, d time.Duration) time.Duration {
|
||||||
r := c.GetString(k, "")
|
r := c.GetString(k, "")
|
||||||
@ -292,7 +276,7 @@ func (c *C) GetDuration(k string, d time.Duration) time.Duration {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *C) Get(k string) any {
|
func (c *C) Get(k string) interface{} {
|
||||||
return c.get(k, c.Settings)
|
return c.get(k, c.Settings)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,10 +284,10 @@ func (c *C) IsSet(k string) bool {
|
|||||||
return c.get(k, c.Settings) != nil
|
return c.get(k, c.Settings) != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *C) get(k string, v any) any {
|
func (c *C) get(k string, v interface{}) interface{} {
|
||||||
parts := strings.Split(k, ".")
|
parts := strings.Split(k, ".")
|
||||||
for _, p := range parts {
|
for _, p := range parts {
|
||||||
m, ok := v.(map[string]any)
|
m, ok := v.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -362,7 +346,7 @@ func (c *C) addFile(path string, direct bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *C) parseRaw(b []byte) error {
|
func (c *C) parseRaw(b []byte) error {
|
||||||
var m map[string]any
|
var m map[interface{}]interface{}
|
||||||
|
|
||||||
err := yaml.Unmarshal(b, &m)
|
err := yaml.Unmarshal(b, &m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -374,7 +358,7 @@ func (c *C) parseRaw(b []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *C) parse() error {
|
func (c *C) parse() error {
|
||||||
var m map[string]any
|
var m map[interface{}]interface{}
|
||||||
|
|
||||||
for _, path := range c.files {
|
for _, path := range c.files {
|
||||||
b, err := os.ReadFile(path)
|
b, err := os.ReadFile(path)
|
||||||
@ -382,7 +366,7 @@ func (c *C) parse() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var nm map[string]any
|
var nm map[interface{}]interface{}
|
||||||
err = yaml.Unmarshal(b, &nm)
|
err = yaml.Unmarshal(b, &nm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig_Load(t *testing.T) {
|
func TestConfig_Load(t *testing.T) {
|
||||||
@ -19,37 +19,40 @@ func TestConfig_Load(t *testing.T) {
|
|||||||
// invalid yaml
|
// invalid yaml
|
||||||
c := NewC(l)
|
c := NewC(l)
|
||||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||||
require.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[string]interface {}")
|
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||||
|
|
||||||
// simple multi config merge
|
// simple multi config merge
|
||||||
c = NewC(l)
|
c = NewC(l)
|
||||||
os.RemoveAll(dir)
|
os.RemoveAll(dir)
|
||||||
os.Mkdir(dir, 0755)
|
os.Mkdir(dir, 0755)
|
||||||
|
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||||
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||||
require.NoError(t, c.Load(dir))
|
assert.Nil(t, c.Load(dir))
|
||||||
expected := map[string]any{
|
expected := map[interface{}]interface{}{
|
||||||
"outer": map[string]any{
|
"outer": map[interface{}]interface{}{
|
||||||
"inner": "override",
|
"inner": "override",
|
||||||
},
|
},
|
||||||
"new": "hi",
|
"new": "hi",
|
||||||
}
|
}
|
||||||
assert.Equal(t, expected, c.Settings)
|
assert.Equal(t, expected, c.Settings)
|
||||||
|
|
||||||
|
//TODO: test symlinked file
|
||||||
|
//TODO: test symlinked directory
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_Get(t *testing.T) {
|
func TestConfig_Get(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
// test simple type
|
// test simple type
|
||||||
c := NewC(l)
|
c := NewC(l)
|
||||||
c.Settings["firewall"] = map[string]any{"outbound": "hi"}
|
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"}
|
||||||
assert.Equal(t, "hi", c.Get("firewall.outbound"))
|
assert.Equal(t, "hi", c.Get("firewall.outbound"))
|
||||||
|
|
||||||
// test complex type
|
// test complex type
|
||||||
inner := []map[string]any{{"port": "1", "code": "2"}}
|
inner := []map[interface{}]interface{}{{"port": "1", "code": "2"}}
|
||||||
c.Settings["firewall"] = map[string]any{"outbound": inner}
|
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": inner}
|
||||||
assert.EqualValues(t, inner, c.Get("firewall.outbound"))
|
assert.EqualValues(t, inner, c.Get("firewall.outbound"))
|
||||||
|
|
||||||
// test missing
|
// test missing
|
||||||
@ -59,7 +62,7 @@ func TestConfig_Get(t *testing.T) {
|
|||||||
func TestConfig_GetStringSlice(t *testing.T) {
|
func TestConfig_GetStringSlice(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := NewC(l)
|
c := NewC(l)
|
||||||
c.Settings["slice"] = []any{"one", "two"}
|
c.Settings["slice"] = []interface{}{"one", "two"}
|
||||||
assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{}))
|
assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,28 +70,28 @@ func TestConfig_GetBool(t *testing.T) {
|
|||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := NewC(l)
|
c := NewC(l)
|
||||||
c.Settings["bool"] = true
|
c.Settings["bool"] = true
|
||||||
assert.True(t, c.GetBool("bool", false))
|
assert.Equal(t, true, c.GetBool("bool", false))
|
||||||
|
|
||||||
c.Settings["bool"] = "true"
|
c.Settings["bool"] = "true"
|
||||||
assert.True(t, c.GetBool("bool", false))
|
assert.Equal(t, true, c.GetBool("bool", false))
|
||||||
|
|
||||||
c.Settings["bool"] = false
|
c.Settings["bool"] = false
|
||||||
assert.False(t, c.GetBool("bool", true))
|
assert.Equal(t, false, c.GetBool("bool", true))
|
||||||
|
|
||||||
c.Settings["bool"] = "false"
|
c.Settings["bool"] = "false"
|
||||||
assert.False(t, c.GetBool("bool", true))
|
assert.Equal(t, false, c.GetBool("bool", true))
|
||||||
|
|
||||||
c.Settings["bool"] = "Y"
|
c.Settings["bool"] = "Y"
|
||||||
assert.True(t, c.GetBool("bool", false))
|
assert.Equal(t, true, c.GetBool("bool", false))
|
||||||
|
|
||||||
c.Settings["bool"] = "yEs"
|
c.Settings["bool"] = "yEs"
|
||||||
assert.True(t, c.GetBool("bool", false))
|
assert.Equal(t, true, c.GetBool("bool", false))
|
||||||
|
|
||||||
c.Settings["bool"] = "N"
|
c.Settings["bool"] = "N"
|
||||||
assert.False(t, c.GetBool("bool", true))
|
assert.Equal(t, false, c.GetBool("bool", true))
|
||||||
|
|
||||||
c.Settings["bool"] = "nO"
|
c.Settings["bool"] = "nO"
|
||||||
assert.False(t, c.GetBool("bool", true))
|
assert.Equal(t, false, c.GetBool("bool", true))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_HasChanged(t *testing.T) {
|
func TestConfig_HasChanged(t *testing.T) {
|
||||||
@ -101,14 +104,14 @@ func TestConfig_HasChanged(t *testing.T) {
|
|||||||
// Test key change
|
// Test key change
|
||||||
c = NewC(l)
|
c = NewC(l)
|
||||||
c.Settings["test"] = "hi"
|
c.Settings["test"] = "hi"
|
||||||
c.oldSettings = map[string]any{"test": "no"}
|
c.oldSettings = map[interface{}]interface{}{"test": "no"}
|
||||||
assert.True(t, c.HasChanged("test"))
|
assert.True(t, c.HasChanged("test"))
|
||||||
assert.True(t, c.HasChanged(""))
|
assert.True(t, c.HasChanged(""))
|
||||||
|
|
||||||
// No key change
|
// No key change
|
||||||
c = NewC(l)
|
c = NewC(l)
|
||||||
c.Settings["test"] = "hi"
|
c.Settings["test"] = "hi"
|
||||||
c.oldSettings = map[string]any{"test": "hi"}
|
c.oldSettings = map[interface{}]interface{}{"test": "hi"}
|
||||||
assert.False(t, c.HasChanged("test"))
|
assert.False(t, c.HasChanged("test"))
|
||||||
assert.False(t, c.HasChanged(""))
|
assert.False(t, c.HasChanged(""))
|
||||||
}
|
}
|
||||||
@ -117,11 +120,11 @@ func TestConfig_ReloadConfig(t *testing.T) {
|
|||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
done := make(chan bool, 1)
|
done := make(chan bool, 1)
|
||||||
dir, err := os.MkdirTemp("", "config-test")
|
dir, err := os.MkdirTemp("", "config-test")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||||
|
|
||||||
c := NewC(l)
|
c := NewC(l)
|
||||||
require.NoError(t, c.Load(dir))
|
assert.Nil(t, c.Load(dir))
|
||||||
|
|
||||||
assert.False(t, c.HasChanged("outer.inner"))
|
assert.False(t, c.HasChanged("outer.inner"))
|
||||||
assert.False(t, c.HasChanged("outer"))
|
assert.False(t, c.HasChanged("outer"))
|
||||||
@ -184,11 +187,11 @@ firewall:
|
|||||||
`),
|
`),
|
||||||
}
|
}
|
||||||
|
|
||||||
var m map[string]any
|
var m map[any]any
|
||||||
|
|
||||||
// merge the same way config.parse() merges
|
// merge the same way config.parse() merges
|
||||||
for _, b := range configs {
|
for _, b := range configs {
|
||||||
var nm map[string]any
|
var nm map[any]any
|
||||||
err := yaml.Unmarshal(b, &nm)
|
err := yaml.Unmarshal(b, &nm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -205,15 +208,15 @@ firewall:
|
|||||||
t.Logf("Merged Config as YAML:\n%s", mYaml)
|
t.Logf("Merged Config as YAML:\n%s", mYaml)
|
||||||
|
|
||||||
// If a bug is present, some items might be replaced instead of merged like we expect
|
// If a bug is present, some items might be replaced instead of merged like we expect
|
||||||
expected := map[string]any{
|
expected := map[any]any{
|
||||||
"firewall": map[string]any{
|
"firewall": map[any]any{
|
||||||
"inbound": []any{
|
"inbound": []any{
|
||||||
map[string]any{"host": "any", "port": "any", "proto": "icmp"},
|
map[any]any{"host": "any", "port": "any", "proto": "icmp"},
|
||||||
map[string]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
|
map[any]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
|
||||||
map[string]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
|
map[any]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
|
||||||
"outbound": []any{
|
"outbound": []any{
|
||||||
map[string]any{"host": "any", "port": "any", "proto": "any"}}},
|
map[any]any{"host": "any", "port": "any", "proto": "any"}}},
|
||||||
"listen": map[string]any{
|
"listen": map[any]any{
|
||||||
"host": "0.0.0.0",
|
"host": "0.0.0.0",
|
||||||
"port": 4242,
|
"port": 4242,
|
||||||
},
|
},
|
||||||
|
|||||||
@ -180,7 +180,7 @@ func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte
|
|||||||
case deleteTunnel:
|
case deleteTunnel:
|
||||||
if cm.hostMap.DeleteHostInfo(hostinfo) {
|
if cm.hostMap.DeleteHostInfo(hostinfo) {
|
||||||
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||||
cm.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
|
cm.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
case closeTunnel:
|
case closeTunnel:
|
||||||
@ -218,7 +218,7 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
|||||||
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||||
|
|
||||||
for _, r := range relayFor {
|
for _, r := range relayFor {
|
||||||
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerAddr)
|
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
||||||
|
|
||||||
var index uint32
|
var index uint32
|
||||||
var relayFrom netip.Addr
|
var relayFrom netip.Addr
|
||||||
@ -230,15 +230,15 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
|||||||
// This relay already exists in newhostinfo, then do nothing.
|
// This relay already exists in newhostinfo, then do nothing.
|
||||||
continue
|
continue
|
||||||
case Requested:
|
case Requested:
|
||||||
// The relay exists in a Requested state; re-send the request
|
// The relayed connection exists in a Requested state; re-send the request
|
||||||
index = existing.LocalIndex
|
index = existing.LocalIndex
|
||||||
switch r.Type {
|
switch r.Type {
|
||||||
case TerminalType:
|
case TerminalType:
|
||||||
relayFrom = cm.intf.myVpnAddrs[0]
|
relayFrom = cm.intf.myVpnNet.Addr()
|
||||||
relayTo = existing.PeerAddr
|
relayTo = existing.PeerIp
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
relayFrom = existing.PeerAddr
|
relayFrom = existing.PeerIp
|
||||||
relayTo = newhostinfo.vpnAddrs[0]
|
relayTo = newhostinfo.vpnIp
|
||||||
default:
|
default:
|
||||||
// should never happen
|
// should never happen
|
||||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||||
@ -254,65 +254,46 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
|||||||
cm.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
// The relay doesn't exist at all; create some relay state and send the request.
|
// The relay doesn't exist at all; create some relay state and send the request.
|
||||||
var err error
|
var err error
|
||||||
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerAddr, nil, r.Type, Requested)
|
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch r.Type {
|
switch r.Type {
|
||||||
case TerminalType:
|
case TerminalType:
|
||||||
relayFrom = cm.intf.myVpnAddrs[0]
|
relayFrom = cm.intf.myVpnNet.Addr()
|
||||||
relayTo = r.PeerAddr
|
relayTo = r.PeerIp
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
relayFrom = r.PeerAddr
|
relayFrom = r.PeerIp
|
||||||
relayTo = newhostinfo.vpnAddrs[0]
|
relayTo = newhostinfo.vpnIp
|
||||||
default:
|
default:
|
||||||
// should never happen
|
// should never happen
|
||||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO: IPV6-WORK
|
||||||
|
relayFromB := relayFrom.As4()
|
||||||
|
relayToB := relayTo.As4()
|
||||||
|
|
||||||
// Send a CreateRelayRequest to the peer.
|
// Send a CreateRelayRequest to the peer.
|
||||||
req := NebulaControl{
|
req := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
InitiatorRelayIndex: index,
|
InitiatorRelayIndex: index,
|
||||||
|
RelayFromIp: binary.BigEndian.Uint32(relayFromB[:]),
|
||||||
|
RelayToIp: binary.BigEndian.Uint32(relayToB[:]),
|
||||||
}
|
}
|
||||||
|
|
||||||
switch newhostinfo.GetCert().Certificate.Version() {
|
|
||||||
case cert.Version1:
|
|
||||||
if !relayFrom.Is4() {
|
|
||||||
cm.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !relayTo.Is4() {
|
|
||||||
cm.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b := relayFrom.As4()
|
|
||||||
req.OldRelayFromAddr = binary.BigEndian.Uint32(b[:])
|
|
||||||
b = relayTo.As4()
|
|
||||||
req.OldRelayToAddr = binary.BigEndian.Uint32(b[:])
|
|
||||||
case cert.Version2:
|
|
||||||
req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
|
|
||||||
req.RelayToAddr = netAddrToProtoAddr(relayTo)
|
|
||||||
default:
|
|
||||||
newhostinfo.logger(cm.l).Error("Unknown certificate version found while attempting to migrate relay")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, err := req.Marshal()
|
msg, err := req.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||||
} else {
|
} else {
|
||||||
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
cm.l.WithFields(logrus.Fields{
|
cm.l.WithFields(logrus.Fields{
|
||||||
"relayFrom": req.RelayFromAddr,
|
"relayFrom": req.RelayFromIp,
|
||||||
"relayTo": req.RelayToAddr,
|
"relayTo": req.RelayToIp,
|
||||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||||
"responderRelayIndex": req.ResponderRelayIndex,
|
"responderRelayIndex": req.ResponderRelayIndex,
|
||||||
"vpnAddrs": newhostinfo.vpnAddrs}).
|
"vpnIp": newhostinfo.vpnIp}).
|
||||||
Info("send CreateRelayRequest")
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -333,7 +314,7 @@ func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Tim
|
|||||||
return closeTunnel, hostinfo, nil
|
return closeTunnel, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
primary := cm.hostMap.Hosts[hostinfo.vpnAddrs[0]]
|
primary := cm.hostMap.Hosts[hostinfo.vpnIp]
|
||||||
mainHostInfo := true
|
mainHostInfo := true
|
||||||
if primary != nil && primary != hostinfo {
|
if primary != nil && primary != hostinfo {
|
||||||
mainHostInfo = false
|
mainHostInfo = false
|
||||||
@ -452,24 +433,21 @@ func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool
|
|||||||
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||||
// Let's sort this out.
|
// Let's sort this out.
|
||||||
|
|
||||||
// Only one side should swap because if both swap then we may never resolve to a single tunnel.
|
if current.vpnIp.Compare(cm.intf.myVpnNet.Addr()) < 0 {
|
||||||
// vpn addr is static across all tunnels for this host pair so lets
|
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
||||||
// use that to determine if we should consider swapping.
|
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
||||||
if current.vpnAddrs[0].Compare(cm.intf.myVpnAddrs[0]) < 0 {
|
// The remotes vpn ip is lower than mine. I will not flip.
|
||||||
// Their primary vpn addr is less than mine. Do not swap.
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
crt := cm.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
|
certState := cm.intf.pki.GetCertState()
|
||||||
// If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
|
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
||||||
// settle down.
|
|
||||||
return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
|
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||||
cm.hostMap.Lock()
|
cm.hostMap.Lock()
|
||||||
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||||
if cm.hostMap.Hosts[current.vpnAddrs[0]] == primary {
|
if cm.hostMap.Hosts[current.vpnIp] == primary {
|
||||||
cm.hostMap.unlockedMakePrimary(current)
|
cm.hostMap.unlockedMakePrimary(current)
|
||||||
}
|
}
|
||||||
cm.hostMap.Unlock()
|
cm.hostMap.Unlock()
|
||||||
@ -484,9 +462,8 @@ func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostI
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
caPool := cm.intf.pki.GetCAPool()
|
valid, err := remoteCert.VerifyWithCache(now, cm.intf.pki.GetCAPool())
|
||||||
err := caPool.VerifyCachedCertificate(now, remoteCert)
|
if valid {
|
||||||
if err == nil {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -495,8 +472,9 @@ func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostI
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
hostinfo.logger(cm.l).WithError(err).
|
hostinfo.logger(cm.l).WithError(err).
|
||||||
WithField("fingerprint", remoteCert.Fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||||
|
|
||||||
return true
|
return true
|
||||||
@ -508,7 +486,7 @@ func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if cm.intf.lightHouse.IsAnyLighthouseAddr(hostinfo.vpnAddrs) {
|
if cm.intf.lightHouse.IsLighthouseIP(hostinfo.vpnIp) {
|
||||||
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
|
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
|
||||||
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
|
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
|
||||||
// would lose the ability to notify us and punchy.respond would become unreliable.
|
// would lose the ability to notify us and punchy.respond would become unreliable.
|
||||||
@ -528,17 +506,14 @@ func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||||
cs := cm.intf.pki.getCertState()
|
certState := cm.intf.pki.GetCertState()
|
||||||
curCrt := hostinfo.ConnectionState.myCert
|
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
||||||
myCrt := cs.getCertificate(curCrt.Version())
|
|
||||||
if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
|
|
||||||
// The current tunnel is using the latest certificate and version, no need to rehandshake.
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
cm.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||||
WithField("reason", "local certificate is not current").
|
WithField("reason", "local certificate is not current").
|
||||||
Info("Re-handshaking with remote")
|
Info("Re-handshaking with remote")
|
||||||
|
|
||||||
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -13,7 +14,6 @@ import (
|
|||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func newTestLighthouse() *LightHouse {
|
func newTestLighthouse() *LightHouse {
|
||||||
@ -34,19 +34,20 @@ func newTestLighthouse() *LightHouse {
|
|||||||
func Test_NewConnectionManagerTest(t *testing.T) {
|
func Test_NewConnectionManagerTest(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
|
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
preferredRanges := []netip.Prefix{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := newHostMap(l)
|
hostMap := newHostMap(l, vpncidr)
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
hostMap.preferredRanges.Store(&preferredRanges)
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
initiatingVersion: cert.Version1,
|
RawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
v1Cert: &dummyCert{version: cert.Version1},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
v1HandshakeBytes: []byte{},
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := newTestLighthouse()
|
lh := newTestLighthouse()
|
||||||
@ -73,12 +74,12 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
vpnAddrs: []netip.Addr{vpnIp},
|
vpnIp: vpnIp,
|
||||||
localIndexId: 1099,
|
localIndexId: 1099,
|
||||||
remoteIndexId: 9901,
|
remoteIndexId: 9901,
|
||||||
}
|
}
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
myCert: &dummyCert{version: cert.Version1},
|
myCert: &cert.NebulaCertificate{},
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
}
|
}
|
||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
@ -87,7 +88,7 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
nc.Out(hostinfo)
|
nc.Out(hostinfo)
|
||||||
nc.In(hostinfo)
|
nc.In(hostinfo)
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.True(t, hostinfo.out.Load())
|
assert.True(t, hostinfo.out.Load())
|
||||||
assert.True(t, hostinfo.in.Load())
|
assert.True(t, hostinfo.in.Load())
|
||||||
@ -106,30 +107,31 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
assert.False(t, hostinfo.out.Load())
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
// Do a final traffic check tick, the host should now be removed
|
// Do a final traffic check tick, the host should now be removed
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs)
|
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewConnectionManagerTest2(t *testing.T) {
|
func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||||
|
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
preferredRanges := []netip.Prefix{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := newHostMap(l)
|
hostMap := newHostMap(l, vpncidr)
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
hostMap.preferredRanges.Store(&preferredRanges)
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
initiatingVersion: cert.Version1,
|
RawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
v1Cert: &dummyCert{version: cert.Version1},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
v1HandshakeBytes: []byte{},
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := newTestLighthouse()
|
lh := newTestLighthouse()
|
||||||
@ -156,12 +158,12 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
vpnAddrs: []netip.Addr{vpnIp},
|
vpnIp: vpnIp,
|
||||||
localIndexId: 1099,
|
localIndexId: 1099,
|
||||||
remoteIndexId: 9901,
|
remoteIndexId: 9901,
|
||||||
}
|
}
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
myCert: &dummyCert{version: cert.Version1},
|
myCert: &cert.NebulaCertificate{},
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
}
|
}
|
||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
@ -172,7 +174,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
assert.True(t, hostinfo.in.Load())
|
assert.True(t, hostinfo.in.Load())
|
||||||
assert.True(t, hostinfo.out.Load())
|
assert.True(t, hostinfo.out.Load())
|
||||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
|
||||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
@ -188,7 +190,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
assert.False(t, hostinfo.out.Load())
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
// We saw traffic, should no longer be pending deletion
|
// We saw traffic, should no longer be pending deletion
|
||||||
nc.In(hostinfo)
|
nc.In(hostinfo)
|
||||||
@ -197,24 +199,25 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
assert.False(t, hostinfo.out.Load())
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
|
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||||
vpnAddrs := []netip.Addr{netip.MustParseAddr("172.1.1.2")}
|
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
preferredRanges := []netip.Prefix{localrange}
|
||||||
|
|
||||||
// Very incomplete mock objects
|
// Very incomplete mock objects
|
||||||
hostMap := newHostMap(l)
|
hostMap := newHostMap(l, vpncidr)
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
hostMap.preferredRanges.Store(&preferredRanges)
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
initiatingVersion: cert.Version1,
|
RawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
v1Cert: &dummyCert{version: cert.Version1},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
v1HandshakeBytes: []byte{},
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := newTestLighthouse()
|
lh := newTestLighthouse()
|
||||||
@ -232,7 +235,7 @@ func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
|||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
conf := config.NewC(l)
|
conf := config.NewC(l)
|
||||||
conf.Settings["tunnels"] = map[string]any{
|
conf.Settings["tunnels"] = map[interface{}]interface{}{
|
||||||
"drop_inactive": true,
|
"drop_inactive": true,
|
||||||
}
|
}
|
||||||
punchy := NewPunchyFromConfig(l, conf)
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
@ -242,12 +245,12 @@ func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
|||||||
|
|
||||||
// Add an ip we have established a connection w/ to hostmap
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
vpnAddrs: vpnAddrs,
|
vpnIp: vpnIp,
|
||||||
localIndexId: 1099,
|
localIndexId: 1099,
|
||||||
remoteIndexId: 9901,
|
remoteIndexId: 9901,
|
||||||
}
|
}
|
||||||
hostinfo.ConnectionState = &ConnectionState{
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
myCert: &dummyCert{version: cert.Version1},
|
myCert: &cert.NebulaCertificate{},
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
}
|
}
|
||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
@ -281,7 +284,7 @@ func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
|||||||
assert.False(t, hostinfo.out.Load())
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
// Finally advance beyond the inactivity timeout
|
// Finally advance beyond the inactivity timeout
|
||||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
|
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
|
||||||
@ -291,7 +294,7 @@ func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
|||||||
assert.False(t, hostinfo.out.Load())
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.False(t, hostinfo.in.Load())
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we can disconnect the peer.
|
// Check if we can disconnect the peer.
|
||||||
@ -300,48 +303,55 @@ func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
|||||||
func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
|
ipNet := net.IPNet{
|
||||||
|
IP: net.IPv4(172, 1, 1, 2),
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
}
|
||||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
preferredRanges := []netip.Prefix{localrange}
|
||||||
hostMap := newHostMap(l)
|
hostMap := newHostMap(l, vpncidr)
|
||||||
hostMap.preferredRanges.Store(&preferredRanges)
|
hostMap.preferredRanges.Store(&preferredRanges)
|
||||||
|
|
||||||
// Generate keys for CA and peer's cert.
|
// Generate keys for CA and peer's cert.
|
||||||
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
tbs := &cert.TBSCertificate{
|
caCert := cert.NebulaCertificate{
|
||||||
Version: 1,
|
Details: cert.NebulaCertificateDetails{
|
||||||
Name: "ca",
|
Name: "ca",
|
||||||
IsCA: true,
|
NotBefore: now,
|
||||||
NotBefore: now,
|
NotAfter: now.Add(1 * time.Hour),
|
||||||
NotAfter: now.Add(1 * time.Hour),
|
IsCA: true,
|
||||||
PublicKey: pubCA,
|
PublicKey: pubCA,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
caCert, err := tbs.Sign(nil, cert.Curve_CURVE25519, privCA)
|
assert.NoError(t, caCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||||
require.NoError(t, err)
|
ncp := &cert.NebulaCAPool{
|
||||||
ncp := cert.NewCAPool()
|
CAs: cert.NewCAPool().CAs,
|
||||||
require.NoError(t, ncp.AddCA(caCert))
|
}
|
||||||
|
ncp.CAs["ca"] = &caCert
|
||||||
|
|
||||||
pubCrt, _, _ := ed25519.GenerateKey(rand.Reader)
|
pubCrt, _, _ := ed25519.GenerateKey(rand.Reader)
|
||||||
tbs = &cert.TBSCertificate{
|
peerCert := cert.NebulaCertificate{
|
||||||
Version: 1,
|
Details: cert.NebulaCertificateDetails{
|
||||||
Name: "host",
|
Name: "host",
|
||||||
Networks: []netip.Prefix{vpncidr},
|
Ips: []*net.IPNet{&ipNet},
|
||||||
NotBefore: now,
|
Subnets: []*net.IPNet{},
|
||||||
NotAfter: now.Add(60 * time.Second),
|
NotBefore: now,
|
||||||
PublicKey: pubCrt,
|
NotAfter: now.Add(60 * time.Second),
|
||||||
|
PublicKey: pubCrt,
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: "ca",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
peerCert, err := tbs.Sign(caCert, cert.Curve_CURVE25519, privCA)
|
assert.NoError(t, peerCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cachedPeerCert, err := ncp.VerifyCertificate(now.Add(time.Second), peerCert)
|
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
privateKey: []byte{},
|
RawCertificate: []byte{},
|
||||||
v1Cert: &dummyCert{},
|
PrivateKey: []byte{},
|
||||||
v1HandshakeBytes: []byte{},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
lh := newTestLighthouse()
|
lh := newTestLighthouse()
|
||||||
@ -367,10 +377,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
ifce.connectionManager = nc
|
ifce.connectionManager = nc
|
||||||
|
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
vpnAddrs: []netip.Addr{vpnIp},
|
vpnIp: vpnIp,
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
myCert: &dummyCert{},
|
myCert: &cert.NebulaCertificate{},
|
||||||
peerCert: cachedPeerCert,
|
peerCert: &peerCert,
|
||||||
H: &noise.HandshakeState{},
|
H: &noise.HandshakeState{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -390,114 +400,3 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
invalid = nc.isInvalidCertificate(nextTick, hostinfo)
|
invalid = nc.isInvalidCertificate(nextTick, hostinfo)
|
||||||
assert.True(t, invalid)
|
assert.True(t, invalid)
|
||||||
}
|
}
|
||||||
|
|
||||||
type dummyCert struct {
|
|
||||||
version cert.Version
|
|
||||||
curve cert.Curve
|
|
||||||
groups []string
|
|
||||||
isCa bool
|
|
||||||
issuer string
|
|
||||||
name string
|
|
||||||
networks []netip.Prefix
|
|
||||||
notAfter time.Time
|
|
||||||
notBefore time.Time
|
|
||||||
publicKey []byte
|
|
||||||
signature []byte
|
|
||||||
unsafeNetworks []netip.Prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Version() cert.Version {
|
|
||||||
return d.version
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Curve() cert.Curve {
|
|
||||||
return d.curve
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Groups() []string {
|
|
||||||
return d.groups
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) IsCA() bool {
|
|
||||||
return d.isCa
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Issuer() string {
|
|
||||||
return d.issuer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Name() string {
|
|
||||||
return d.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Networks() []netip.Prefix {
|
|
||||||
return d.networks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) NotAfter() time.Time {
|
|
||||||
return d.notAfter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) NotBefore() time.Time {
|
|
||||||
return d.notBefore
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) PublicKey() []byte {
|
|
||||||
return d.publicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Signature() []byte {
|
|
||||||
return d.signature
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) UnsafeNetworks() []netip.Prefix {
|
|
||||||
return d.unsafeNetworks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) MarshalForHandshakes() ([]byte, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Sign(curve cert.Curve, key []byte) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) CheckSignature(key []byte) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Expired(t time.Time) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) CheckRootConstraints(signer cert.Certificate) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) VerifyPrivateKey(curve cert.Curve, key []byte) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) String() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Marshal() ([]byte, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) MarshalPEM() ([]byte, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Fingerprint() (string, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) MarshalJSON() ([]byte, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dummyCert) Copy() cert.Certificate {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|||||||
@ -3,7 +3,6 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
@ -19,54 +18,50 @@ type ConnectionState struct {
|
|||||||
eKey *NebulaCipherState
|
eKey *NebulaCipherState
|
||||||
dKey *NebulaCipherState
|
dKey *NebulaCipherState
|
||||||
H *noise.HandshakeState
|
H *noise.HandshakeState
|
||||||
myCert cert.Certificate
|
myCert *cert.NebulaCertificate
|
||||||
peerCert *cert.CachedCertificate
|
peerCert *cert.NebulaCertificate
|
||||||
initiator bool
|
initiator bool
|
||||||
messageCounter atomic.Uint64
|
messageCounter atomic.Uint64
|
||||||
window *Bits
|
window *Bits
|
||||||
writeLock sync.Mutex
|
writeLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, initiator bool, pattern noise.HandshakePattern) (*ConnectionState, error) {
|
func NewConnectionState(l *logrus.Logger, cipher string, certState *CertState, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||||
var dhFunc noise.DHFunc
|
var dhFunc noise.DHFunc
|
||||||
switch crt.Curve() {
|
switch certState.Certificate.Details.Curve {
|
||||||
case cert.Curve_CURVE25519:
|
case cert.Curve_CURVE25519:
|
||||||
dhFunc = noise.DH25519
|
dhFunc = noise.DH25519
|
||||||
case cert.Curve_P256:
|
case cert.Curve_P256:
|
||||||
if cs.pkcs11Backed {
|
dhFunc = noiseutil.DHP256
|
||||||
dhFunc = noiseutil.DHP256PKCS11
|
|
||||||
} else {
|
|
||||||
dhFunc = noiseutil.DHP256
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid curve: %s", crt.Curve())
|
l.Errorf("invalid curve: %s", certState.Certificate.Details.Curve)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var ncs noise.CipherSuite
|
var cs noise.CipherSuite
|
||||||
if cs.cipher == "chachapoly" {
|
if cipher == "chachapoly" {
|
||||||
ncs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||||
} else {
|
} else {
|
||||||
ncs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
cs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||||
}
|
}
|
||||||
|
|
||||||
static := noise.DHKey{Private: cs.privateKey, Public: crt.PublicKey()}
|
static := noise.DHKey{Private: certState.PrivateKey, Public: certState.PublicKey}
|
||||||
|
|
||||||
b := NewBits(ReplayWindow)
|
b := NewBits(ReplayWindow)
|
||||||
// Clear out bit 0, we never transmit it, and we don't want it showing as packet loss
|
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
||||||
b.Update(l, 0)
|
b.Update(l, 0)
|
||||||
|
|
||||||
hs, err := noise.NewHandshakeState(noise.Config{
|
hs, err := noise.NewHandshakeState(noise.Config{
|
||||||
CipherSuite: ncs,
|
CipherSuite: cs,
|
||||||
Random: rand.Reader,
|
Random: rand.Reader,
|
||||||
Pattern: pattern,
|
Pattern: pattern,
|
||||||
Initiator: initiator,
|
Initiator: initiator,
|
||||||
StaticKeypair: static,
|
StaticKeypair: static,
|
||||||
//NOTE: These should come from CertState (pki.go) when we finally implement it
|
PresharedKey: psk,
|
||||||
PresharedKey: []byte{},
|
PresharedKeyPlacement: pskStage,
|
||||||
PresharedKeyPlacement: 0,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewConnectionState: %s", err)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The queue and ready params prevent a counter race that would happen when
|
// The queue and ready params prevent a counter race that would happen when
|
||||||
@ -75,12 +70,12 @@ func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, i
|
|||||||
H: hs,
|
H: hs,
|
||||||
initiator: initiator,
|
initiator: initiator,
|
||||||
window: b,
|
window: b,
|
||||||
myCert: crt,
|
myCert: certState.Certificate,
|
||||||
}
|
}
|
||||||
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
|
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
|
||||||
ci.messageCounter.Add(2)
|
ci.messageCounter.Add(2)
|
||||||
|
|
||||||
return ci, nil
|
return ci
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
||||||
@ -90,7 +85,3 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
|||||||
"message_counter": cs.messageCounter.Load(),
|
"message_counter": cs.messageCounter.Load(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ConnectionState) Curve() cert.Curve {
|
|
||||||
return cs.myCert.Curve()
|
|
||||||
}
|
|
||||||
|
|||||||
72
control.go
72
control.go
@ -19,9 +19,9 @@ import (
|
|||||||
type controlEach func(h *HostInfo)
|
type controlEach func(h *HostInfo)
|
||||||
|
|
||||||
type controlHostLister interface {
|
type controlHostLister interface {
|
||||||
QueryVpnAddr(vpnAddr netip.Addr) *HostInfo
|
QueryVpnIp(vpnIp netip.Addr) *HostInfo
|
||||||
ForEachIndex(each controlEach)
|
ForEachIndex(each controlEach)
|
||||||
ForEachVpnAddr(each controlEach)
|
ForEachVpnIp(each controlEach)
|
||||||
GetPreferredRanges() []netip.Prefix
|
GetPreferredRanges() []netip.Prefix
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -38,15 +38,15 @@ type Control struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ControlHostInfo struct {
|
type ControlHostInfo struct {
|
||||||
VpnAddrs []netip.Addr `json:"vpnAddrs"`
|
VpnIp netip.Addr `json:"vpnIp"`
|
||||||
LocalIndex uint32 `json:"localIndex"`
|
LocalIndex uint32 `json:"localIndex"`
|
||||||
RemoteIndex uint32 `json:"remoteIndex"`
|
RemoteIndex uint32 `json:"remoteIndex"`
|
||||||
RemoteAddrs []netip.AddrPort `json:"remoteAddrs"`
|
RemoteAddrs []netip.AddrPort `json:"remoteAddrs"`
|
||||||
Cert cert.Certificate `json:"cert"`
|
Cert *cert.NebulaCertificate `json:"cert"`
|
||||||
MessageCounter uint64 `json:"messageCounter"`
|
MessageCounter uint64 `json:"messageCounter"`
|
||||||
CurrentRemote netip.AddrPort `json:"currentRemote"`
|
CurrentRemote netip.AddrPort `json:"currentRemote"`
|
||||||
CurrentRelaysToMe []netip.Addr `json:"currentRelaysToMe"`
|
CurrentRelaysToMe []netip.Addr `json:"currentRelaysToMe"`
|
||||||
CurrentRelaysThroughMe []netip.Addr `json:"currentRelaysThroughMe"`
|
CurrentRelaysThroughMe []netip.Addr `json:"currentRelaysThroughMe"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
|
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
|
||||||
@ -134,17 +134,15 @@ func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
|
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
|
||||||
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) cert.Certificate {
|
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) *cert.NebulaCertificate {
|
||||||
if c.f.myVpnAddrsTable.Contains(vpnIp) {
|
if c.f.myVpnNet.Addr() == vpnIp {
|
||||||
// Only returning the default certificate since its impossible
|
return c.f.pki.GetCertState().Certificate
|
||||||
// for any other host but ourselves to have more than 1
|
|
||||||
return c.f.pki.getCertState().GetDefaultCertificate().Copy()
|
|
||||||
}
|
}
|
||||||
hi := c.f.hostMap.QueryVpnAddr(vpnIp)
|
hi := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if hi == nil {
|
if hi == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return hi.GetCert().Certificate.Copy()
|
return hi.GetCert()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTunnel creates a new tunnel to the given vpn ip.
|
// CreateTunnel creates a new tunnel to the given vpn ip.
|
||||||
@ -154,7 +152,7 @@ func (c *Control) CreateTunnel(vpnIp netip.Addr) {
|
|||||||
|
|
||||||
// PrintTunnel creates a new tunnel to the given vpn ip.
|
// PrintTunnel creates a new tunnel to the given vpn ip.
|
||||||
func (c *Control) PrintTunnel(vpnIp netip.Addr) *ControlHostInfo {
|
func (c *Control) PrintTunnel(vpnIp netip.Addr) *ControlHostInfo {
|
||||||
hi := c.f.hostMap.QueryVpnAddr(vpnIp)
|
hi := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if hi == nil {
|
if hi == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -171,9 +169,9 @@ func (c *Control) QueryLighthouse(vpnIp netip.Addr) *CacheMap {
|
|||||||
return hi.CopyCache()
|
return hi.CopyCache()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHostInfoByVpnAddr returns a single tunnels hostInfo, or nil if not found
|
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
||||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||||
func (c *Control) GetHostInfoByVpnAddr(vpnAddr netip.Addr, pending bool) *ControlHostInfo {
|
func (c *Control) GetHostInfoByVpnIp(vpnIp netip.Addr, pending bool) *ControlHostInfo {
|
||||||
var hl controlHostLister
|
var hl controlHostLister
|
||||||
if pending {
|
if pending {
|
||||||
hl = c.f.handshakeManager
|
hl = c.f.handshakeManager
|
||||||
@ -181,7 +179,7 @@ func (c *Control) GetHostInfoByVpnAddr(vpnAddr netip.Addr, pending bool) *Contro
|
|||||||
hl = c.f.hostMap
|
hl = c.f.hostMap
|
||||||
}
|
}
|
||||||
|
|
||||||
h := hl.QueryVpnAddr(vpnAddr)
|
h := hl.QueryVpnIp(vpnIp)
|
||||||
if h == nil {
|
if h == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -193,7 +191,7 @@ func (c *Control) GetHostInfoByVpnAddr(vpnAddr netip.Addr, pending bool) *Contro
|
|||||||
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
||||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||||
func (c *Control) SetRemoteForTunnel(vpnIp netip.Addr, addr netip.AddrPort) *ControlHostInfo {
|
func (c *Control) SetRemoteForTunnel(vpnIp netip.Addr, addr netip.AddrPort) *ControlHostInfo {
|
||||||
hostInfo := c.f.hostMap.QueryVpnAddr(vpnIp)
|
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if hostInfo == nil {
|
if hostInfo == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -206,7 +204,7 @@ func (c *Control) SetRemoteForTunnel(vpnIp netip.Addr, addr netip.AddrPort) *Con
|
|||||||
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
||||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||||
func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
|
func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
|
||||||
hostInfo := c.f.hostMap.QueryVpnAddr(vpnIp)
|
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||||
if hostInfo == nil {
|
if hostInfo == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -230,14 +228,19 @@ func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
|
|||||||
// CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels
|
// CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels
|
||||||
// the int returned is a count of tunnels closed
|
// the int returned is a count of tunnels closed
|
||||||
func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
||||||
|
//TODO: this is probably better as a function in ConnectionManager or HostMap directly
|
||||||
|
lighthouses := c.f.lightHouse.GetLighthouses()
|
||||||
|
|
||||||
shutdown := func(h *HostInfo) {
|
shutdown := func(h *HostInfo) {
|
||||||
if excludeLighthouses && c.f.lightHouse.IsAnyLighthouseAddr(h.vpnAddrs) {
|
if excludeLighthouses {
|
||||||
return
|
if _, ok := lighthouses[h.vpnIp]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
||||||
c.f.closeTunnel(h)
|
c.f.closeTunnel(h)
|
||||||
|
|
||||||
c.l.WithField("vpnAddrs", h.vpnAddrs).WithField("udpAddr", h.remote).
|
c.l.WithField("vpnIp", h.vpnIp).WithField("udpAddr", h.remote).
|
||||||
Debug("Sending close tunnel message")
|
Debug("Sending close tunnel message")
|
||||||
closed++
|
closed++
|
||||||
}
|
}
|
||||||
@ -247,7 +250,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||||||
// Grab the hostMap lock to access the Relays map
|
// Grab the hostMap lock to access the Relays map
|
||||||
c.f.hostMap.Lock()
|
c.f.hostMap.Lock()
|
||||||
for _, relayingHost := range c.f.hostMap.Relays {
|
for _, relayingHost := range c.f.hostMap.Relays {
|
||||||
relayingHosts[relayingHost.vpnAddrs[0]] = relayingHost
|
relayingHosts[relayingHost.vpnIp] = relayingHost
|
||||||
}
|
}
|
||||||
c.f.hostMap.Unlock()
|
c.f.hostMap.Unlock()
|
||||||
|
|
||||||
@ -255,7 +258,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||||||
// Grab the hostMap lock to access the Hosts map
|
// Grab the hostMap lock to access the Hosts map
|
||||||
c.f.hostMap.Lock()
|
c.f.hostMap.Lock()
|
||||||
for _, relayHost := range c.f.hostMap.Indexes {
|
for _, relayHost := range c.f.hostMap.Indexes {
|
||||||
if _, ok := relayingHosts[relayHost.vpnAddrs[0]]; !ok {
|
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
|
||||||
hostInfos = append(hostInfos, relayHost)
|
hostInfos = append(hostInfos, relayHost)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -275,8 +278,9 @@ func (c *Control) Device() overlay.Device {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
||||||
|
|
||||||
chi := ControlHostInfo{
|
chi := ControlHostInfo{
|
||||||
VpnAddrs: make([]netip.Addr, len(h.vpnAddrs)),
|
VpnIp: h.vpnIp,
|
||||||
LocalIndex: h.localIndexId,
|
LocalIndex: h.localIndexId,
|
||||||
RemoteIndex: h.remoteIndexId,
|
RemoteIndex: h.remoteIndexId,
|
||||||
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
||||||
@ -285,16 +289,12 @@ func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
|||||||
CurrentRemote: h.remote,
|
CurrentRemote: h.remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, a := range h.vpnAddrs {
|
|
||||||
chi.VpnAddrs[i] = a
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.ConnectionState != nil {
|
if h.ConnectionState != nil {
|
||||||
chi.MessageCounter = h.ConnectionState.messageCounter.Load()
|
chi.MessageCounter = h.ConnectionState.messageCounter.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
if c := h.GetCert(); c != nil {
|
if c := h.GetCert(); c != nil {
|
||||||
chi.Cert = c.Certificate.Copy()
|
chi.Cert = c.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
return chi
|
return chi
|
||||||
@ -303,7 +303,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
|||||||
func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
|
func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
|
||||||
hosts := make([]ControlHostInfo, 0)
|
hosts := make([]ControlHostInfo, 0)
|
||||||
pr := hl.GetPreferredRanges()
|
pr := hl.GetPreferredRanges()
|
||||||
hl.ForEachVpnAddr(func(hostinfo *HostInfo) {
|
hl.ForEachVpnIp(func(hostinfo *HostInfo) {
|
||||||
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||||
})
|
})
|
||||||
return hosts
|
return hosts
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
@ -13,13 +14,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||||
//TODO: CERT-V2 with multiple certificate versions we have a problem with this test
|
|
||||||
// Some certs versions have different characteristics and each version implements their own Copy() func
|
|
||||||
// which means this is not a good place to test for exposing memory
|
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
||||||
// To properly ensure we are not exposing core memory to the caller
|
// To properly ensure we are not exposing core memory to the caller
|
||||||
hm := newHostMap(l)
|
hm := newHostMap(l, netip.Prefix{})
|
||||||
hm.preferredRanges.Store(&[]netip.Prefix{})
|
hm.preferredRanges.Store(&[]netip.Prefix{})
|
||||||
|
|
||||||
remote1 := netip.MustParseAddrPort("0.0.0.100:4444")
|
remote1 := netip.MustParseAddrPort("0.0.0.100:4444")
|
||||||
@ -35,27 +33,42 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
Mask: net.IPMask{255, 255, 255, 0},
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
remotes := NewRemoteList([]netip.Addr{netip.IPv4Unspecified()}, nil)
|
crt := &cert.NebulaCertificate{
|
||||||
remotes.unlockedPrependV4(netip.IPv4Unspecified(), netAddrToProtoV4AddrPort(remote1.Addr(), remote1.Port()))
|
Details: cert.NebulaCertificateDetails{
|
||||||
remotes.unlockedPrependV6(netip.IPv4Unspecified(), netAddrToProtoV6AddrPort(remote2.Addr(), remote2.Port()))
|
Name: "test",
|
||||||
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
Subnets: []*net.IPNet{},
|
||||||
|
Groups: []string{"default-group"},
|
||||||
|
NotBefore: time.Unix(1, 0),
|
||||||
|
NotAfter: time.Unix(2, 0),
|
||||||
|
PublicKey: []byte{5, 6, 7, 8},
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: "the-issuer",
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||||
|
},
|
||||||
|
Signature: []byte{1, 2, 1, 2, 1, 3},
|
||||||
|
}
|
||||||
|
|
||||||
|
remotes := NewRemoteList(nil)
|
||||||
|
remotes.unlockedPrependV4(netip.IPv4Unspecified(), NewIp4AndPortFromNetIP(remote1.Addr(), remote1.Port()))
|
||||||
|
remotes.unlockedPrependV6(netip.IPv4Unspecified(), NewIp6AndPortFromNetIP(remote2.Addr(), remote2.Port()))
|
||||||
|
|
||||||
vpnIp, ok := netip.AddrFromSlice(ipNet.IP)
|
vpnIp, ok := netip.AddrFromSlice(ipNet.IP)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
crt := &dummyCert{}
|
|
||||||
hm.unlockedAddHostInfo(&HostInfo{
|
hm.unlockedAddHostInfo(&HostInfo{
|
||||||
remote: remote1,
|
remote: remote1,
|
||||||
remotes: remotes,
|
remotes: remotes,
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &cert.CachedCertificate{Certificate: crt},
|
peerCert: crt,
|
||||||
},
|
},
|
||||||
remoteIndexId: 200,
|
remoteIndexId: 200,
|
||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnAddrs: []netip.Addr{vpnIp},
|
vpnIp: vpnIp,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: nil,
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}, &Interface{})
|
}, &Interface{})
|
||||||
|
|
||||||
@ -70,11 +83,11 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
remoteIndexId: 200,
|
remoteIndexId: 200,
|
||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnAddrs: []netip.Addr{vpnIp2},
|
vpnIp: vpnIp2,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: nil,
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}, &Interface{})
|
}, &Interface{})
|
||||||
|
|
||||||
@ -85,10 +98,10 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
l: logrus.New(),
|
l: logrus.New(),
|
||||||
}
|
}
|
||||||
|
|
||||||
thi := c.GetHostInfoByVpnAddr(vpnIp, false)
|
thi := c.GetHostInfoByVpnIp(vpnIp, false)
|
||||||
|
|
||||||
expectedInfo := ControlHostInfo{
|
expectedInfo := ControlHostInfo{
|
||||||
VpnAddrs: []netip.Addr{vpnIp},
|
VpnIp: vpnIp,
|
||||||
LocalIndex: 201,
|
LocalIndex: 201,
|
||||||
RemoteIndex: 200,
|
RemoteIndex: 200,
|
||||||
RemoteAddrs: []netip.AddrPort{remote2, remote1},
|
RemoteAddrs: []netip.AddrPort{remote2, remote1},
|
||||||
@ -100,17 +113,18 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we don't have any unexpected fields
|
// Make sure we don't have any unexpected fields
|
||||||
assertFields(t, []string{"VpnAddrs", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||||
assert.Equal(t, &expectedInfo, thi)
|
assert.EqualValues(t, &expectedInfo, thi)
|
||||||
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
//TODO: netip.Addr reuses global memory for zone identifiers which breaks our "no reused memory check" here
|
||||||
|
//test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||||
|
|
||||||
// Make sure we don't panic if the host info doesn't have a cert yet
|
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
thi = c.GetHostInfoByVpnAddr(vpnIp2, false)
|
thi = c.GetHostInfoByVpnIp(vpnIp2, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertFields(t *testing.T, expected []string, actualStruct any) {
|
func assertFields(t *testing.T, expected []string, actualStruct interface{}) {
|
||||||
val := reflect.ValueOf(actualStruct).Elem()
|
val := reflect.ValueOf(actualStruct).Elem()
|
||||||
fields := make([]string, val.NumField())
|
fields := make([]string, val.NumField())
|
||||||
for i := 0; i < val.NumField(); i++ {
|
for i := 0; i < val.NumField(); i++ {
|
||||||
|
|||||||
@ -6,6 +6,8 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
|
||||||
"github.com/google/gopacket"
|
"github.com/google/gopacket"
|
||||||
"github.com/google/gopacket/layers"
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
@ -49,15 +51,15 @@ func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType,
|
|||||||
// This is necessary if you did not configure static hosts or are not running a lighthouse
|
// This is necessary if you did not configure static hosts or are not running a lighthouse
|
||||||
func (c *Control) InjectLightHouseAddr(vpnIp netip.Addr, toAddr netip.AddrPort) {
|
func (c *Control) InjectLightHouseAddr(vpnIp netip.Addr, toAddr netip.AddrPort) {
|
||||||
c.f.lightHouse.Lock()
|
c.f.lightHouse.Lock()
|
||||||
remoteList := c.f.lightHouse.unlockedGetRemoteList([]netip.Addr{vpnIp})
|
remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
|
||||||
remoteList.Lock()
|
remoteList.Lock()
|
||||||
defer remoteList.Unlock()
|
defer remoteList.Unlock()
|
||||||
c.f.lightHouse.Unlock()
|
c.f.lightHouse.Unlock()
|
||||||
|
|
||||||
if toAddr.Addr().Is4() {
|
if toAddr.Addr().Is4() {
|
||||||
remoteList.unlockedPrependV4(vpnIp, netAddrToProtoV4AddrPort(toAddr.Addr(), toAddr.Port()))
|
remoteList.unlockedPrependV4(vpnIp, NewIp4AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
|
||||||
} else {
|
} else {
|
||||||
remoteList.unlockedPrependV6(vpnIp, netAddrToProtoV6AddrPort(toAddr.Addr(), toAddr.Port()))
|
remoteList.unlockedPrependV6(vpnIp, NewIp6AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,12 +67,12 @@ func (c *Control) InjectLightHouseAddr(vpnIp netip.Addr, toAddr netip.AddrPort)
|
|||||||
// This is necessary to inform an initiator of possible relays for communicating with a responder
|
// This is necessary to inform an initiator of possible relays for communicating with a responder
|
||||||
func (c *Control) InjectRelays(vpnIp netip.Addr, relayVpnIps []netip.Addr) {
|
func (c *Control) InjectRelays(vpnIp netip.Addr, relayVpnIps []netip.Addr) {
|
||||||
c.f.lightHouse.Lock()
|
c.f.lightHouse.Lock()
|
||||||
remoteList := c.f.lightHouse.unlockedGetRemoteList([]netip.Addr{vpnIp})
|
remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
|
||||||
remoteList.Lock()
|
remoteList.Lock()
|
||||||
defer remoteList.Unlock()
|
defer remoteList.Unlock()
|
||||||
c.f.lightHouse.Unlock()
|
c.f.lightHouse.Unlock()
|
||||||
|
|
||||||
remoteList.unlockedSetRelay(vpnIp, relayVpnIps)
|
remoteList.unlockedSetRelay(vpnIp, vpnIp, relayVpnIps)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFromTun will pull a packet off the tun side of nebula
|
// GetFromTun will pull a packet off the tun side of nebula
|
||||||
@ -97,42 +99,21 @@ func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
||||||
func (c *Control) InjectTunUDPPacket(toAddr netip.Addr, toPort uint16, fromAddr netip.Addr, fromPort uint16, data []byte) {
|
func (c *Control) InjectTunUDPPacket(toIp netip.Addr, toPort uint16, fromPort uint16, data []byte) {
|
||||||
serialize := make([]gopacket.SerializableLayer, 0)
|
//TODO: IPV6-WORK
|
||||||
var netLayer gopacket.NetworkLayer
|
ip := layers.IPv4{
|
||||||
if toAddr.Is6() {
|
Version: 4,
|
||||||
if !fromAddr.Is6() {
|
TTL: 64,
|
||||||
panic("Cant send ipv6 to ipv4")
|
Protocol: layers.IPProtocolUDP,
|
||||||
}
|
SrcIP: c.f.inside.Cidr().Addr().Unmap().AsSlice(),
|
||||||
ip := &layers.IPv6{
|
DstIP: toIp.Unmap().AsSlice(),
|
||||||
Version: 6,
|
|
||||||
NextHeader: layers.IPProtocolUDP,
|
|
||||||
SrcIP: fromAddr.Unmap().AsSlice(),
|
|
||||||
DstIP: toAddr.Unmap().AsSlice(),
|
|
||||||
}
|
|
||||||
serialize = append(serialize, ip)
|
|
||||||
netLayer = ip
|
|
||||||
} else {
|
|
||||||
if !fromAddr.Is4() {
|
|
||||||
panic("Cant send ipv4 to ipv6")
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := &layers.IPv4{
|
|
||||||
Version: 4,
|
|
||||||
TTL: 64,
|
|
||||||
Protocol: layers.IPProtocolUDP,
|
|
||||||
SrcIP: fromAddr.Unmap().AsSlice(),
|
|
||||||
DstIP: toAddr.Unmap().AsSlice(),
|
|
||||||
}
|
|
||||||
serialize = append(serialize, ip)
|
|
||||||
netLayer = ip
|
|
||||||
}
|
}
|
||||||
|
|
||||||
udp := layers.UDP{
|
udp := layers.UDP{
|
||||||
SrcPort: layers.UDPPort(fromPort),
|
SrcPort: layers.UDPPort(fromPort),
|
||||||
DstPort: layers.UDPPort(toPort),
|
DstPort: layers.UDPPort(toPort),
|
||||||
}
|
}
|
||||||
err := udp.SetNetworkLayerForChecksum(netLayer)
|
err := udp.SetNetworkLayerForChecksum(&ip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -142,9 +123,7 @@ func (c *Control) InjectTunUDPPacket(toAddr netip.Addr, toPort uint16, fromAddr
|
|||||||
ComputeChecksums: true,
|
ComputeChecksums: true,
|
||||||
FixLengths: true,
|
FixLengths: true,
|
||||||
}
|
}
|
||||||
|
err = gopacket.SerializeLayers(buffer, opt, &ip, &udp, gopacket.Payload(data))
|
||||||
serialize = append(serialize, &udp, gopacket.Payload(data))
|
|
||||||
err = gopacket.SerializeLayers(buffer, opt, serialize...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -152,8 +131,8 @@ func (c *Control) InjectTunUDPPacket(toAddr netip.Addr, toPort uint16, fromAddr
|
|||||||
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
|
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetVpnAddrs() []netip.Addr {
|
func (c *Control) GetVpnIp() netip.Addr {
|
||||||
return c.f.myVpnAddrs
|
return c.f.myVpnNet.Addr()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetUDPAddr() netip.AddrPort {
|
func (c *Control) GetUDPAddr() netip.AddrPort {
|
||||||
@ -161,7 +140,7 @@ func (c *Control) GetUDPAddr() netip.AddrPort {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) KillPendingTunnel(vpnIp netip.Addr) bool {
|
func (c *Control) KillPendingTunnel(vpnIp netip.Addr) bool {
|
||||||
hostinfo := c.f.handshakeManager.QueryVpnAddr(vpnIp)
|
hostinfo := c.f.handshakeManager.QueryVpnIp(vpnIp)
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -174,8 +153,8 @@ func (c *Control) GetHostmap() *HostMap {
|
|||||||
return c.f.hostMap
|
return c.f.hostMap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) GetCertState() *CertState {
|
func (c *Control) GetCert() *cert.NebulaCertificate {
|
||||||
return c.f.pki.getCertState()
|
return c.f.pki.GetCertState().Certificate
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Control) ReHandshake(vpnIp netip.Addr) {
|
func (c *Control) ReHandshake(vpnIp netip.Addr) {
|
||||||
|
|||||||
118
dns_server.go
118
dns_server.go
@ -8,7 +8,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
@ -22,39 +21,24 @@ var dnsAddr string
|
|||||||
|
|
||||||
type dnsRecords struct {
|
type dnsRecords struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
l *logrus.Logger
|
dnsMap map[string]string
|
||||||
dnsMap4 map[string]netip.Addr
|
hostMap *HostMap
|
||||||
dnsMap6 map[string]netip.Addr
|
|
||||||
hostMap *HostMap
|
|
||||||
myVpnAddrsTable *bart.Lite
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords {
|
func newDnsRecords(hostMap *HostMap) *dnsRecords {
|
||||||
return &dnsRecords{
|
return &dnsRecords{
|
||||||
l: l,
|
dnsMap: make(map[string]string),
|
||||||
dnsMap4: make(map[string]netip.Addr),
|
hostMap: hostMap,
|
||||||
dnsMap6: make(map[string]netip.Addr),
|
|
||||||
hostMap: hostMap,
|
|
||||||
myVpnAddrsTable: cs.myVpnAddrsTable,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dnsRecords) Query(q uint16, data string) netip.Addr {
|
func (d *dnsRecords) Query(data string) string {
|
||||||
data = strings.ToLower(data)
|
|
||||||
d.RLock()
|
d.RLock()
|
||||||
defer d.RUnlock()
|
defer d.RUnlock()
|
||||||
switch q {
|
if r, ok := d.dnsMap[strings.ToLower(data)]; ok {
|
||||||
case dns.TypeA:
|
return r
|
||||||
if r, ok := d.dnsMap4[data]; ok {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
case dns.TypeAAAA:
|
|
||||||
if r, ok := d.dnsMap6[data]; ok {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return ""
|
||||||
return netip.Addr{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dnsRecords) QueryCert(data string) string {
|
func (d *dnsRecords) QueryCert(data string) string {
|
||||||
@ -63,7 +47,7 @@ func (d *dnsRecords) QueryCert(data string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo := d.hostMap.QueryVpnAddr(ip)
|
hostinfo := d.hostMap.QueryVpnIp(ip)
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@ -73,69 +57,43 @@ func (d *dnsRecords) QueryCert(data string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := q.Certificate.MarshalJSON()
|
cert := q.Details
|
||||||
if err != nil {
|
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAfter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
||||||
return ""
|
return c
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds the first IPv4 and IPv6 address that appears in `addresses` as the record for `host`
|
func (d *dnsRecords) Add(host, data string) {
|
||||||
func (d *dnsRecords) Add(host string, addresses []netip.Addr) {
|
|
||||||
host = strings.ToLower(host)
|
|
||||||
d.Lock()
|
d.Lock()
|
||||||
defer d.Unlock()
|
defer d.Unlock()
|
||||||
haveV4 := false
|
d.dnsMap[strings.ToLower(host)] = data
|
||||||
haveV6 := false
|
|
||||||
for _, addr := range addresses {
|
|
||||||
if addr.Is4() && !haveV4 {
|
|
||||||
d.dnsMap4[host] = addr
|
|
||||||
haveV4 = true
|
|
||||||
} else if addr.Is6() && !haveV6 {
|
|
||||||
d.dnsMap6[host] = addr
|
|
||||||
haveV6 = true
|
|
||||||
}
|
|
||||||
if haveV4 && haveV6 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dnsRecords) isSelfNebulaOrLocalhost(addr string) bool {
|
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||||
a, _, _ := net.SplitHostPort(addr)
|
|
||||||
b, err := netip.ParseAddr(a)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsLoopback() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//if we found it in this table, it's good
|
|
||||||
return d.myVpnAddrsTable.Contains(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) {
|
|
||||||
for _, q := range m.Question {
|
for _, q := range m.Question {
|
||||||
switch q.Qtype {
|
switch q.Qtype {
|
||||||
case dns.TypeA, dns.TypeAAAA:
|
case dns.TypeA:
|
||||||
qType := dns.TypeToString[q.Qtype]
|
l.Debugf("Query for A %s", q.Name)
|
||||||
d.l.Debugf("Query for %s %s", qType, q.Name)
|
ip := dnsR.Query(q.Name)
|
||||||
ip := d.Query(q.Qtype, q.Name)
|
if ip != "" {
|
||||||
if ip.IsValid() {
|
rr, err := dns.NewRR(fmt.Sprintf("%s A %s", q.Name, ip))
|
||||||
rr, err := dns.NewRR(fmt.Sprintf("%s %s %s", q.Name, qType, ip))
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
m.Answer = append(m.Answer, rr)
|
m.Answer = append(m.Answer, rr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case dns.TypeTXT:
|
case dns.TypeTXT:
|
||||||
// We only answer these queries from nebula nodes or localhost
|
a, _, _ := net.SplitHostPort(w.RemoteAddr().String())
|
||||||
if !d.isSelfNebulaOrLocalhost(w.RemoteAddr().String()) {
|
b, err := netip.ParseAddr(a)
|
||||||
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.l.Debugf("Query for TXT %s", q.Name)
|
|
||||||
ip := d.QueryCert(q.Name)
|
// We don't answer these queries from non nebula nodes or localhost
|
||||||
|
//l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR)
|
||||||
|
if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.Debugf("Query for TXT %s", q.Name)
|
||||||
|
ip := dnsR.QueryCert(q.Name)
|
||||||
if ip != "" {
|
if ip != "" {
|
||||||
rr, err := dns.NewRR(fmt.Sprintf("%s TXT %s", q.Name, ip))
|
rr, err := dns.NewRR(fmt.Sprintf("%s TXT %s", q.Name, ip))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -150,24 +108,26 @@ func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dnsRecords) handleDnsRequest(w dns.ResponseWriter, r *dns.Msg) {
|
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
|
||||||
m := new(dns.Msg)
|
m := new(dns.Msg)
|
||||||
m.SetReply(r)
|
m.SetReply(r)
|
||||||
m.Compress = false
|
m.Compress = false
|
||||||
|
|
||||||
switch r.Opcode {
|
switch r.Opcode {
|
||||||
case dns.OpcodeQuery:
|
case dns.OpcodeQuery:
|
||||||
d.parseQuery(m, w)
|
parseQuery(l, m, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteMsg(m)
|
w.WriteMsg(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dnsMain(l *logrus.Logger, cs *CertState, hostMap *HostMap, c *config.C) func() {
|
func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
|
||||||
dnsR = newDnsRecords(l, cs, hostMap)
|
dnsR = newDnsRecords(hostMap)
|
||||||
|
|
||||||
// attach request handler func
|
// attach request handler func
|
||||||
dns.HandleFunc(".", dnsR.handleDnsRequest)
|
dns.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) {
|
||||||
|
handleDnsRequest(l, w, r)
|
||||||
|
})
|
||||||
|
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
reloadDns(l, c)
|
reloadDns(l, c)
|
||||||
|
|||||||
@ -1,61 +1,46 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParsequery(t *testing.T) {
|
func TestParsequery(t *testing.T) {
|
||||||
l := logrus.New()
|
//TODO: This test is basically pointless
|
||||||
hostMap := &HostMap{}
|
hostMap := &HostMap{}
|
||||||
ds := newDnsRecords(l, &CertState{}, hostMap)
|
ds := newDnsRecords(hostMap)
|
||||||
addrs := []netip.Addr{
|
ds.Add("test.com.com", "1.2.3.4")
|
||||||
netip.MustParseAddr("1.2.3.4"),
|
|
||||||
netip.MustParseAddr("1.2.3.5"),
|
|
||||||
netip.MustParseAddr("fd01::24"),
|
|
||||||
netip.MustParseAddr("fd01::25"),
|
|
||||||
}
|
|
||||||
ds.Add("test.com.com", addrs)
|
|
||||||
|
|
||||||
m := &dns.Msg{}
|
m := new(dns.Msg)
|
||||||
m.SetQuestion("test.com.com", dns.TypeA)
|
m.SetQuestion("test.com.com", dns.TypeA)
|
||||||
ds.parseQuery(m, nil)
|
|
||||||
assert.NotNil(t, m.Answer)
|
|
||||||
assert.Equal(t, "1.2.3.4", m.Answer[0].(*dns.A).A.String())
|
|
||||||
|
|
||||||
m = &dns.Msg{}
|
//parseQuery(m)
|
||||||
m.SetQuestion("test.com.com", dns.TypeAAAA)
|
|
||||||
ds.parseQuery(m, nil)
|
|
||||||
assert.NotNil(t, m.Answer)
|
|
||||||
assert.Equal(t, "fd01::24", m.Answer[0].(*dns.AAAA).AAAA.String())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_getDnsServerAddr(t *testing.T) {
|
func Test_getDnsServerAddr(t *testing.T) {
|
||||||
c := config.NewC(nil)
|
c := config.NewC(nil)
|
||||||
|
|
||||||
c.Settings["lighthouse"] = map[string]any{
|
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||||
"dns": map[string]any{
|
"dns": map[interface{}]interface{}{
|
||||||
"host": "0.0.0.0",
|
"host": "0.0.0.0",
|
||||||
"port": "1",
|
"port": "1",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
|
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
|
||||||
|
|
||||||
c.Settings["lighthouse"] = map[string]any{
|
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||||
"dns": map[string]any{
|
"dns": map[interface{}]interface{}{
|
||||||
"host": "::",
|
"host": "::",
|
||||||
"port": "1",
|
"port": "1",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||||
|
|
||||||
c.Settings["lighthouse"] = map[string]any{
|
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||||
"dns": map[string]any{
|
"dns": map[interface{}]interface{}{
|
||||||
"host": "[::]",
|
"host": "[::]",
|
||||||
"port": "1",
|
"port": "1",
|
||||||
},
|
},
|
||||||
@ -63,8 +48,8 @@ func Test_getDnsServerAddr(t *testing.T) {
|
|||||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||||
|
|
||||||
// Make sure whitespace doesn't mess us up
|
// Make sure whitespace doesn't mess us up
|
||||||
c.Settings["lighthouse"] = map[string]any{
|
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||||
"dns": map[string]any{
|
"dns": map[interface{}]interface{}{
|
||||||
"host": "[::] ",
|
"host": "[::] ",
|
||||||
"port": "1",
|
"port": "1",
|
||||||
},
|
},
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
125
e2e/helpers.go
Normal file
125
e2e/helpers.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"golang.org/x/crypto/curve25519"
|
||||||
|
"golang.org/x/crypto/ed25519"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewTestCaCert will generate a CA cert
|
||||||
|
func NewTestCaCert(before, after time.Time, ips, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
|
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
nc := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "test ca",
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: true,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ips) > 0 {
|
||||||
|
nc.Details.Ips = make([]*net.IPNet, len(ips))
|
||||||
|
for i, ip := range ips {
|
||||||
|
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subnets) > 0 {
|
||||||
|
nc.Details.Subnets = make([]*net.IPNet, len(subnets))
|
||||||
|
for i, ip := range subnets {
|
||||||
|
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(groups) > 0 {
|
||||||
|
nc.Details.Groups = groups
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := nc.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nc, pub, priv, pem
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestCert will generate a signed certificate with the provided details.
|
||||||
|
// Expiry times are defaulted if you do not pass them in
|
||||||
|
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip netip.Prefix, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||||
|
issuer, err := ca.Sha256Sum()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if before.IsZero() {
|
||||||
|
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if after.IsZero() {
|
||||||
|
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub, rawPriv := x25519Keypair()
|
||||||
|
ipb := ip.Addr().AsSlice()
|
||||||
|
nc := &cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: name,
|
||||||
|
Ips: []*net.IPNet{{IP: ipb[:], Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}},
|
||||||
|
//Subnets: subnets,
|
||||||
|
Groups: groups,
|
||||||
|
NotBefore: time.Unix(before.Unix(), 0),
|
||||||
|
NotAfter: time.Unix(after.Unix(), 0),
|
||||||
|
PublicKey: pub,
|
||||||
|
IsCA: false,
|
||||||
|
Issuer: issuer,
|
||||||
|
InvertedGroups: make(map[string]struct{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nc.Sign(ca.Details.Curve, key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := nc.MarshalToPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||||
|
}
|
||||||
|
|
||||||
|
func x25519Keypair() ([]byte, []byte) {
|
||||||
|
privkey := make([]byte, 32)
|
||||||
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubkey, privkey
|
||||||
|
}
|
||||||
@ -8,7 +8,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -18,47 +17,36 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/cert_test"
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type m = map[string]any
|
type m map[string]interface{}
|
||||||
|
|
||||||
// newSimpleServer creates a nebula instance with many assumptions
|
// newSimpleServer creates a nebula instance with many assumptions
|
||||||
func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name string, sVpnNetworks string, overrides m) (*nebula.Control, []netip.Prefix, netip.AddrPort, *config.C) {
|
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, sVpnIpNet string, overrides m) (*nebula.Control, netip.Prefix, netip.AddrPort, *config.C) {
|
||||||
l := NewTestLogger()
|
l := NewTestLogger()
|
||||||
|
|
||||||
var vpnNetworks []netip.Prefix
|
vpnIpNet, err := netip.ParsePrefix(sVpnIpNet)
|
||||||
for _, sn := range strings.Split(sVpnNetworks, ",") {
|
if err != nil {
|
||||||
vpnIpNet, err := netip.ParsePrefix(strings.TrimSpace(sn))
|
panic(err)
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
vpnNetworks = append(vpnNetworks, vpnIpNet)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vpnNetworks) == 0 {
|
|
||||||
panic("no vpn networks")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var udpAddr netip.AddrPort
|
var udpAddr netip.AddrPort
|
||||||
if vpnNetworks[0].Addr().Is4() {
|
if vpnIpNet.Addr().Is4() {
|
||||||
budpIp := vpnNetworks[0].Addr().As4()
|
budpIp := vpnIpNet.Addr().As4()
|
||||||
budpIp[1] -= 128
|
budpIp[1] -= 128
|
||||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
|
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
|
||||||
} else {
|
} else {
|
||||||
budpIp := vpnNetworks[0].Addr().As16()
|
budpIp := vpnIpNet.Addr().As16()
|
||||||
// beef for funsies
|
budpIp[13] -= 128
|
||||||
budpIp[2] = 190
|
|
||||||
budpIp[3] = 239
|
|
||||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
||||||
}
|
}
|
||||||
_, _, myPrivKey, myPEM := cert_test.NewTestCert(v, cert.Curve_CURVE25519, caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnNetworks, nil, []string{})
|
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||||
|
|
||||||
caB, err := caCrt.MarshalPEM()
|
caB, err := caCrt.MarshalToPEM()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -100,16 +88,11 @@ func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name
|
|||||||
}
|
}
|
||||||
|
|
||||||
if overrides != nil {
|
if overrides != nil {
|
||||||
final := m{}
|
err = mergo.Merge(&overrides, mc, mergo.WithAppendSlice)
|
||||||
err = mergo.Merge(&final, overrides, mergo.WithAppendSlice)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
err = mergo.Merge(&final, mc, mergo.WithAppendSlice)
|
mc = overrides
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
mc = final
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cb, err := yaml.Marshal(mc)
|
cb, err := yaml.Marshal(mc)
|
||||||
@ -126,7 +109,7 @@ func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return control, vpnNetworks, udpAddr, c
|
return control, vpnIpNet, udpAddr, c
|
||||||
}
|
}
|
||||||
|
|
||||||
type doneCb func()
|
type doneCb func()
|
||||||
@ -149,28 +132,27 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
|
|||||||
|
|
||||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control, r *router.R) {
|
func assertTunnel(t *testing.T, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control, r *router.R) {
|
||||||
// Send a packet from them to me
|
// Send a packet from them to me
|
||||||
controlB.InjectTunUDPPacket(vpnIpA, 80, vpnIpB, 90, []byte("Hi from B"))
|
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
||||||
bPacket := r.RouteForAllUntilTxTun(controlA)
|
bPacket := r.RouteForAllUntilTxTun(controlA)
|
||||||
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
||||||
|
|
||||||
// And once more from me to them
|
// And once more from me to them
|
||||||
controlA.InjectTunUDPPacket(vpnIpB, 80, vpnIpA, 90, []byte("Hello from A"))
|
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
|
||||||
aPacket := r.RouteForAllUntilTxTun(controlB)
|
aPacket := r.RouteForAllUntilTxTun(controlB)
|
||||||
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnNetsA, vpnNetsB []netip.Prefix, controlA, controlB *nebula.Control) {
|
func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control) {
|
||||||
// Get both host infos
|
// Get both host infos
|
||||||
//TODO: CERT-V2 we may want to loop over each vpnAddr and assert all the things
|
hBinA := controlA.GetHostInfoByVpnIp(vpnIpB, false)
|
||||||
hBinA := controlA.GetHostInfoByVpnAddr(vpnNetsB[0].Addr(), false)
|
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
|
||||||
assert.NotNil(t, hBinA, "Host B was not found by vpnAddr in controlA")
|
|
||||||
|
|
||||||
hAinB := controlB.GetHostInfoByVpnAddr(vpnNetsA[0].Addr(), false)
|
hAinB := controlB.GetHostInfoByVpnIp(vpnIpA, false)
|
||||||
assert.NotNil(t, hAinB, "Host A was not found by vpnAddr in controlB")
|
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
|
||||||
|
|
||||||
// Check that both vpn and real addr are correct
|
// Check that both vpn and real addr are correct
|
||||||
assert.EqualValues(t, getAddrs(vpnNetsB), hBinA.VpnAddrs, "Host B VpnIp is wrong in control A")
|
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
|
||||||
assert.EqualValues(t, getAddrs(vpnNetsA), hAinB.VpnAddrs, "Host A VpnIp is wrong in control B")
|
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
|
||||||
|
|
||||||
assert.Equal(t, addrB, hBinA.CurrentRemote, "Host B remote is wrong in control A")
|
assert.Equal(t, addrB, hBinA.CurrentRemote, "Host B remote is wrong in control A")
|
||||||
assert.Equal(t, addrA, hAinB.CurrentRemote, "Host A remote is wrong in control B")
|
assert.Equal(t, addrA, hAinB.CurrentRemote, "Host A remote is wrong in control B")
|
||||||
@ -178,36 +160,25 @@ func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnNetsA, vpn
|
|||||||
// Check that our indexes match
|
// Check that our indexes match
|
||||||
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
|
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
|
||||||
assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index")
|
assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index")
|
||||||
|
|
||||||
|
//TODO: Would be nice to assert this memory
|
||||||
|
//checkIndexes := func(name string, hm *HostMap, hi *HostInfo) {
|
||||||
|
// hBbyIndex := hmA.Indexes[hBinA.localIndexId]
|
||||||
|
// assert.NotNil(t, hBbyIndex, "Could not host info by local index in %s", name)
|
||||||
|
// assert.Equal(t, &hBbyIndex, &hBinA, "%s Indexes map did not point to the right host info", name)
|
||||||
|
//
|
||||||
|
// //TODO: remote indexes are susceptible to collision
|
||||||
|
// hBbyRemoteIndex := hmA.RemoteIndexes[hBinA.remoteIndexId]
|
||||||
|
// assert.NotNil(t, hBbyIndex, "Could not host info by remote index in %s", name)
|
||||||
|
// assert.Equal(t, &hBbyRemoteIndex, &hBinA, "%s RemoteIndexes did not point to the right host info", name)
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//// Check hostmap indexes too
|
||||||
|
//checkIndexes("hmA", hmA, hBinA)
|
||||||
|
//checkIndexes("hmB", hmB, hAinB)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
||||||
if toIp.Is6() {
|
|
||||||
assertUdpPacket6(t, expected, b, fromIp, toIp, fromPort, toPort)
|
|
||||||
} else {
|
|
||||||
assertUdpPacket4(t, expected, b, fromIp, toIp, fromPort, toPort)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertUdpPacket6(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
|
||||||
packet := gopacket.NewPacket(b, layers.LayerTypeIPv6, gopacket.Lazy)
|
|
||||||
v6 := packet.Layer(layers.LayerTypeIPv6).(*layers.IPv6)
|
|
||||||
assert.NotNil(t, v6, "No ipv6 data found")
|
|
||||||
|
|
||||||
assert.Equal(t, fromIp.AsSlice(), []byte(v6.SrcIP), "Source ip was incorrect")
|
|
||||||
assert.Equal(t, toIp.AsSlice(), []byte(v6.DstIP), "Dest ip was incorrect")
|
|
||||||
|
|
||||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
|
||||||
assert.NotNil(t, udp, "No udp data found")
|
|
||||||
|
|
||||||
assert.Equal(t, fromPort, uint16(udp.SrcPort), "Source port was incorrect")
|
|
||||||
assert.Equal(t, toPort, uint16(udp.DstPort), "Dest port was incorrect")
|
|
||||||
|
|
||||||
data := packet.ApplicationLayer()
|
|
||||||
assert.NotNil(t, data)
|
|
||||||
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertUdpPacket4(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
|
||||||
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
|
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||||
assert.NotNil(t, v4, "No ipv4 data found")
|
assert.NotNil(t, v4, "No ipv4 data found")
|
||||||
@ -226,14 +197,6 @@ func assertUdpPacket4(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr,
|
|||||||
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
|
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAddrs(ns []netip.Prefix) []netip.Addr {
|
|
||||||
var a []netip.Addr
|
|
||||||
for _, n := range ns {
|
|
||||||
a = append(a, n.Addr())
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTestLogger() *logrus.Logger {
|
func NewTestLogger() *logrus.Logger {
|
||||||
l := logrus.New()
|
l := logrus.New()
|
||||||
|
|
||||||
|
|||||||
@ -58,9 +58,8 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
|
|||||||
var lines []string
|
var lines []string
|
||||||
var globalLines []*edge
|
var globalLines []*edge
|
||||||
|
|
||||||
crt := c.GetCertState().GetDefaultCertificate()
|
clusterName := strings.Trim(c.GetCert().Details.Name, " ")
|
||||||
clusterName := strings.Trim(crt.Name(), " ")
|
clusterVpnIp := c.GetCert().Details.Ips[0].IP
|
||||||
clusterVpnIp := crt.Networks()[0].Addr()
|
|
||||||
r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp)
|
r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp)
|
||||||
|
|
||||||
hm := c.GetHostmap()
|
hm := c.GetHostmap()
|
||||||
@ -102,8 +101,8 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
|
|||||||
for _, idx := range indexes {
|
for _, idx := range indexes {
|
||||||
hi, ok := hm.Indexes[idx]
|
hi, ok := hm.Indexes[idx]
|
||||||
if ok {
|
if ok {
|
||||||
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnAddrs())
|
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp())
|
||||||
remoteClusterName := strings.Trim(hi.GetCert().Certificate.Name(), " ")
|
remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ")
|
||||||
globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())})
|
globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())})
|
||||||
_ = hi
|
_ = hi
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,8 +10,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -136,10 +136,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
|||||||
panic("Duplicate listen address: " + addr.String())
|
panic("Duplicate listen address: " + addr.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, vpnAddr := range c.GetVpnAddrs() {
|
r.vpnControls[c.GetVpnIp()] = c
|
||||||
r.vpnControls[vpnAddr] = c
|
|
||||||
}
|
|
||||||
|
|
||||||
r.controls[addr] = c
|
r.controls[addr] = c
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,11 +213,11 @@ func (r *R) renderFlow() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
participants[addr] = struct{}{}
|
participants[addr] = struct{}{}
|
||||||
sanAddr := normalizeName(addr.String())
|
sanAddr := strings.Replace(addr.String(), ":", "-", 1)
|
||||||
participantsVals = append(participantsVals, sanAddr)
|
participantsVals = append(participantsVals, sanAddr)
|
||||||
fmt.Fprintf(
|
fmt.Fprintf(
|
||||||
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
||||||
sanAddr, e.packet.from.GetVpnAddrs(), sanAddr,
|
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,9 +250,9 @@ func (r *R) renderFlow() {
|
|||||||
|
|
||||||
fmt.Fprintf(f,
|
fmt.Fprintf(f,
|
||||||
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
||||||
normalizeName(p.from.GetUDPAddr().String()),
|
strings.Replace(p.from.GetUDPAddr().String(), ":", "-", 1),
|
||||||
line,
|
line,
|
||||||
normalizeName(p.to.GetUDPAddr().String()),
|
strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
|
||||||
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -270,11 +267,6 @@ func (r *R) renderFlow() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalizeName(s string) string {
|
|
||||||
rx := regexp.MustCompile("[\\[\\]\\:]")
|
|
||||||
return rx.ReplaceAllLiteralString(s, "_")
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria.
|
// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria.
|
||||||
// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets
|
// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets
|
||||||
// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered
|
// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered
|
||||||
@ -311,7 +303,7 @@ func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
|
|||||||
func (r *R) renderHostmaps(title string) {
|
func (r *R) renderHostmaps(title string) {
|
||||||
c := maps.Values(r.controls)
|
c := maps.Values(r.controls)
|
||||||
sort.SliceStable(c, func(i, j int) bool {
|
sort.SliceStable(c, func(i, j int) bool {
|
||||||
return c[i].GetVpnAddrs()[0].Compare(c[j].GetVpnAddrs()[0]) > 0
|
return c[i].GetVpnIp().Compare(c[j].GetVpnIp()) > 0
|
||||||
})
|
})
|
||||||
|
|
||||||
s := renderHostmaps(c...)
|
s := renderHostmaps(c...)
|
||||||
@ -427,11 +419,10 @@ func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []
|
|||||||
// Nope, lets push the sender along
|
// Nope, lets push the sender along
|
||||||
case p := <-udpTx:
|
case p := <-udpTx:
|
||||||
r.Lock()
|
r.Lock()
|
||||||
a := sender.GetUDPAddr()
|
c := r.getControl(sender.GetUDPAddr(), p.To, p)
|
||||||
c := r.getControl(a, p.To, p)
|
|
||||||
if c == nil {
|
if c == nil {
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("No control for udp tx " + a.String())
|
panic("No control for udp tx")
|
||||||
}
|
}
|
||||||
fp := r.unlockedInjectFlow(sender, c, p, false)
|
fp := r.unlockedInjectFlow(sender, c, p, false)
|
||||||
c.InjectUDPPacket(p)
|
c.InjectUDPPacket(p)
|
||||||
@ -484,11 +475,10 @@ func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
|
|||||||
} else {
|
} else {
|
||||||
// we are a udp tx, route and continue
|
// we are a udp tx, route and continue
|
||||||
p := rx.Interface().(*udp.Packet)
|
p := rx.Interface().(*udp.Packet)
|
||||||
a := cm[x].GetUDPAddr()
|
c := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
||||||
c := r.getControl(a, p.To, p)
|
|
||||||
if c == nil {
|
if c == nil {
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic(fmt.Sprintf("No control for udp tx %s", p.To))
|
panic("No control for udp tx")
|
||||||
}
|
}
|
||||||
fp := r.unlockedInjectFlow(cm[x], c, p, false)
|
fp := r.unlockedInjectFlow(cm[x], c, p, false)
|
||||||
c.InjectUDPPacket(p)
|
c.InjectUDPPacket(p)
|
||||||
@ -722,42 +712,30 @@ func (r *R) getControl(fromAddr, toAddr netip.AddrPort, p *udp.Packet) *nebula.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *R) formatUdpPacket(p *packet) string {
|
func (r *R) formatUdpPacket(p *packet) string {
|
||||||
var packet gopacket.Packet
|
packet := gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||||
var srcAddr netip.Addr
|
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||||
|
if v4 == nil {
|
||||||
packet = gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv6, gopacket.Lazy)
|
panic("not an ipv4 packet")
|
||||||
if packet.ErrorLayer() == nil {
|
|
||||||
v6 := packet.Layer(layers.LayerTypeIPv6).(*layers.IPv6)
|
|
||||||
if v6 == nil {
|
|
||||||
panic("not an ipv6 packet")
|
|
||||||
}
|
|
||||||
srcAddr, _ = netip.AddrFromSlice(v6.SrcIP)
|
|
||||||
} else {
|
|
||||||
packet = gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv4, gopacket.Lazy)
|
|
||||||
v6 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
|
||||||
if v6 == nil {
|
|
||||||
panic("not an ipv6 packet")
|
|
||||||
}
|
|
||||||
srcAddr, _ = netip.AddrFromSlice(v6.SrcIP)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
from := "unknown"
|
from := "unknown"
|
||||||
|
srcAddr, _ := netip.AddrFromSlice(v4.SrcIP)
|
||||||
if c, ok := r.vpnControls[srcAddr]; ok {
|
if c, ok := r.vpnControls[srcAddr]; ok {
|
||||||
from = c.GetUDPAddr().String()
|
from = c.GetUDPAddr().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
udpLayer := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||||
if udpLayer == nil {
|
if udp == nil {
|
||||||
panic("not a udp packet")
|
panic("not a udp packet")
|
||||||
}
|
}
|
||||||
|
|
||||||
data := packet.ApplicationLayer()
|
data := packet.ApplicationLayer()
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
||||||
normalizeName(from),
|
strings.Replace(from, ":", "-", 1),
|
||||||
normalizeName(p.to.GetUDPAddr().String()),
|
strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
|
||||||
udpLayer.SrcPort,
|
udp.SrcPort,
|
||||||
udpLayer.DstPort,
|
udp.DstPort,
|
||||||
string(data.Payload()),
|
string(data.Payload()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,21 +7,19 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/slackhq/nebula/cert_test"
|
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDropInactiveTunnels(t *testing.T) {
|
func TestDropInactiveTunnels(t *testing.T) {
|
||||||
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||||
// under ideal conditions
|
// under ideal conditions
|
||||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
|
||||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
|
||||||
|
|
||||||
// Share our underlay information
|
// Share our underlay information
|
||||||
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||||
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||||
|
|
||||||
// Start the servers
|
// Start the servers
|
||||||
myControl.Start()
|
myControl.Start()
|
||||||
@ -30,7 +28,7 @@ func TestDropInactiveTunnels(t *testing.T) {
|
|||||||
r := router.NewR(t, myControl, theirControl)
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
|
||||||
r.Log("Assert the tunnel between me and them works")
|
r.Log("Assert the tunnel between me and them works")
|
||||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||||
|
|
||||||
r.Log("Go inactive and wait for the tunnels to get dropped")
|
r.Log("Go inactive and wait for the tunnels to get dropped")
|
||||||
waitStart := time.Now()
|
waitStart := time.Now()
|
||||||
|
|||||||
@ -13,12 +13,6 @@ pki:
|
|||||||
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
||||||
#disconnect_invalid: true
|
#disconnect_invalid: true
|
||||||
|
|
||||||
# initiating_version controls which certificate version is used when initiating handshakes.
|
|
||||||
# This setting only applies if both a v1 and a v2 certificate are configured, in which case it will default to `1`.
|
|
||||||
# Once all hosts in the mesh are configured with both a v1 and v2 certificate then this should be changed to `2`.
|
|
||||||
# After all hosts in the mesh are using a v2 certificate then v1 certificates are no longer needed.
|
|
||||||
# initiating_version: 1
|
|
||||||
|
|
||||||
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
||||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||||
# The syntax is:
|
# The syntax is:
|
||||||
@ -126,8 +120,8 @@ lighthouse:
|
|||||||
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
||||||
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
||||||
listen:
|
listen:
|
||||||
# To listen on only ipv4, use "0.0.0.0"
|
# To listen on both any ipv4 and ipv6 use "::"
|
||||||
host: "::"
|
host: 0.0.0.0
|
||||||
port: 4242
|
port: 4242
|
||||||
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
||||||
# default is 64, does not support reload
|
# default is 64, does not support reload
|
||||||
@ -144,11 +138,6 @@ listen:
|
|||||||
# valid values: always, never, private
|
# valid values: always, never, private
|
||||||
# This setting is reloadable.
|
# This setting is reloadable.
|
||||||
#send_recv_error: always
|
#send_recv_error: always
|
||||||
# The so_sock option is a Linux-specific feature that allows all outgoing Nebula packets to be tagged with a specific identifier.
|
|
||||||
# This tagging enables IP rule-based filtering. For example, it supports 0.0.0.0/0 unsafe_routes,
|
|
||||||
# allowing for more precise routing decisions based on the packet tags. Default is 0 meaning no mark is set.
|
|
||||||
# This setting is reloadable.
|
|
||||||
#so_mark: 0
|
|
||||||
|
|
||||||
# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
|
# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
|
||||||
# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
|
# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
|
||||||
@ -239,28 +228,7 @@ tun:
|
|||||||
|
|
||||||
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
||||||
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
||||||
# Supports weighted ECMP if you define a list of gateways, this can be used for load balancing or redundancy to hosts outside of nebula
|
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
||||||
# NOTES:
|
|
||||||
# * You will only see a single gateway in the routing table if you are not on linux
|
|
||||||
# * If a gateway is not reachable through the overlay another gateway will be selected to send the traffic through, ignoring weights
|
|
||||||
#
|
|
||||||
# unsafe_routes:
|
|
||||||
# # Multiple gateways without defining a weight defaults to a weight of 1, this will balance traffic equally between the three gateways
|
|
||||||
# - route: 192.168.87.0/24
|
|
||||||
# via:
|
|
||||||
# - gateway: 10.0.0.1
|
|
||||||
# - gateway: 10.0.0.2
|
|
||||||
# - gateway: 10.0.0.3
|
|
||||||
# # Multiple gateways with a weight, this will balance traffic accordingly
|
|
||||||
# - route: 192.168.87.0/24
|
|
||||||
# via:
|
|
||||||
# - gateway: 10.0.0.1
|
|
||||||
# weight: 10
|
|
||||||
# - gateway: 10.0.0.2
|
|
||||||
# weight: 5
|
|
||||||
#
|
|
||||||
# NOTE: The nebula certificate of the "via" node(s) *MUST* have the "route" defined as a subnet in its certificate
|
|
||||||
# `via`: single node or list of gateways to use for this route
|
|
||||||
# `mtu`: will default to tun mtu if this option is not specified
|
# `mtu`: will default to tun mtu if this option is not specified
|
||||||
# `metric`: will default to 0 if this option is not specified
|
# `metric`: will default to 0 if this option is not specified
|
||||||
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||||
@ -275,11 +243,8 @@ tun:
|
|||||||
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
|
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
|
||||||
# in nebula configuration files. Default false, not reloadable.
|
# in nebula configuration files. Default false, not reloadable.
|
||||||
#use_system_route_table: false
|
#use_system_route_table: false
|
||||||
# Buffer size for reading routes updates. 0 means default system buffer size. (/proc/sys/net/core/rmem_default).
|
|
||||||
# If using massive routes updates, for example BGP, you may need to increase this value to avoid packet loss.
|
|
||||||
# SO_RCVBUFFORCE is used to avoid having to raise the system wide max
|
|
||||||
#use_system_route_table_buffer_size: 0
|
|
||||||
|
|
||||||
|
# TODO
|
||||||
# Configure logging level
|
# Configure logging level
|
||||||
logging:
|
logging:
|
||||||
# panic, fatal, error, warning, info, or debug. Default is info and is reloadable.
|
# panic, fatal, error, warning, info, or debug. Default is info and is reloadable.
|
||||||
@ -362,11 +327,11 @@ firewall:
|
|||||||
outbound_action: drop
|
outbound_action: drop
|
||||||
inbound_action: drop
|
inbound_action: drop
|
||||||
|
|
||||||
# THIS FLAG IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE. (Defaults to false.)
|
# Controls the default value for local_cidr. Default is true, will be deprecated after v1.9 and defaulted to false.
|
||||||
# This setting only affects nebula hosts exposing unsafe_routes. When set to false, each inbound rule must contain a
|
# This setting only affects nebula hosts with subnets encoded in their certificate. A nebula host acting as an
|
||||||
# `local_cidr` if the intention is to allow traffic to flow to an unsafe route. When set to true, every firewall rule
|
# unsafe router with `default_local_cidr_any: true` will expose their unsafe routes to every inbound rule regardless
|
||||||
# will apply to all configured unsafe_routes regardless of the actual destination of the packet, unless `local_cidr`
|
# of the actual destination for the packet. Setting this to false requires each inbound rule to contain a `local_cidr`
|
||||||
# is explicitly defined. This is usually not the desired behavior and should be avoided!
|
# if the intention is to allow traffic to flow to an unsafe route.
|
||||||
#default_local_cidr_any: false
|
#default_local_cidr_any: false
|
||||||
|
|
||||||
conntrack:
|
conntrack:
|
||||||
@ -383,10 +348,10 @@ firewall:
|
|||||||
# host: `any` or a literal hostname, ie `test-host`
|
# host: `any` or a literal hostname, ie `test-host`
|
||||||
# group: `any` or a literal group name, ie `default-group`
|
# group: `any` or a literal group name, ie `default-group`
|
||||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||||
# cidr: a remote CIDR, `0.0.0.0/0` is any ipv4 and `::/0` is any ipv6.
|
# cidr: a remote CIDR, `0.0.0.0/0` is any.
|
||||||
# local_cidr: a local CIDR, `0.0.0.0/0` is any ipv4 and `::/0` is any ipv6. This can be used to filter destinations when using unsafe_routes.
|
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
|
||||||
# By default, this is set to only the VPN (overlay) networks assigned via the certificate networks field unless `default_local_cidr_any` is set to true.
|
# Default is `any` unless the certificate contains subnets and then the default is the ip issued in the certificate
|
||||||
# If there are unsafe_routes present in this config file, `local_cidr` should be set appropriately for the intended us case.
|
# if `default_local_cidr_any` is false, otherwise its `any`.
|
||||||
# ca_name: An issuing CA name
|
# ca_name: An issuing CA name
|
||||||
# ca_sha: An issuing CA shasum
|
# ca_sha: An issuing CA shasum
|
||||||
|
|
||||||
|
|||||||
@ -5,12 +5,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/slackhq/nebula"
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/overlay"
|
|
||||||
"github.com/slackhq/nebula/service"
|
"github.com/slackhq/nebula/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -63,16 +59,7 @@ pki:
|
|||||||
if err := cfg.LoadString(configStr); err != nil {
|
if err := cfg.LoadString(configStr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
svc, err := service.New(&cfg)
|
||||||
logger := logrus.New()
|
|
||||||
logger.Out = os.Stdout
|
|
||||||
|
|
||||||
ctrl, err := nebula.Main(&cfg, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
svc, err := service.New(ctrl)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
209
firewall.go
209
firewall.go
@ -22,7 +22,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type FirewallInterface interface {
|
type FirewallInterface interface {
|
||||||
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, addr, localAddr netip.Prefix, caName string, caSha string) error
|
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type conn struct {
|
type conn struct {
|
||||||
@ -51,13 +51,10 @@ type Firewall struct {
|
|||||||
UDPTimeout time.Duration //linux: 180s max
|
UDPTimeout time.Duration //linux: 180s max
|
||||||
DefaultTimeout time.Duration //linux: 600s
|
DefaultTimeout time.Duration //linux: 600s
|
||||||
|
|
||||||
// routableNetworks describes the vpn addresses as well as any unsafe networks issued to us in the certificate.
|
// Used to ensure we don't emit local packets for ips we don't own
|
||||||
// The vpn addresses are a full bit match while the unsafe networks only match the prefix
|
localIps *bart.Table[struct{}]
|
||||||
routableNetworks *bart.Lite
|
assignedCIDR netip.Prefix
|
||||||
|
hasSubnets bool
|
||||||
// assignedNetworks is a list of vpn networks assigned to us in the certificate.
|
|
||||||
assignedNetworks []netip.Prefix
|
|
||||||
hasUnsafeNetworks bool
|
|
||||||
|
|
||||||
rules string
|
rules string
|
||||||
rulesVersion uint16
|
rulesVersion uint16
|
||||||
@ -70,9 +67,9 @@ type Firewall struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type firewallMetrics struct {
|
type firewallMetrics struct {
|
||||||
droppedLocalAddr metrics.Counter
|
droppedLocalIP metrics.Counter
|
||||||
droppedRemoteAddr metrics.Counter
|
droppedRemoteIP metrics.Counter
|
||||||
droppedNoRule metrics.Counter
|
droppedNoRule metrics.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
type FirewallConntrack struct {
|
type FirewallConntrack struct {
|
||||||
@ -125,91 +122,92 @@ type firewallPort map[int32]*FirewallCA
|
|||||||
|
|
||||||
type firewallLocalCIDR struct {
|
type firewallLocalCIDR struct {
|
||||||
Any bool
|
Any bool
|
||||||
LocalCIDR *bart.Lite
|
LocalCIDR *bart.Table[struct{}]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
||||||
// The certificate provided should be the highest version loaded in memory.
|
func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
|
||||||
func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c cert.Certificate) *Firewall {
|
|
||||||
//TODO: error on 0 duration
|
//TODO: error on 0 duration
|
||||||
var tmin, tmax time.Duration
|
var min, max time.Duration
|
||||||
|
|
||||||
if tcpTimeout < UDPTimeout {
|
if tcpTimeout < UDPTimeout {
|
||||||
tmin = tcpTimeout
|
min = tcpTimeout
|
||||||
tmax = UDPTimeout
|
max = UDPTimeout
|
||||||
} else {
|
} else {
|
||||||
tmin = UDPTimeout
|
min = UDPTimeout
|
||||||
tmax = tcpTimeout
|
max = tcpTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
if defaultTimeout < tmin {
|
if defaultTimeout < min {
|
||||||
tmin = defaultTimeout
|
min = defaultTimeout
|
||||||
} else if defaultTimeout > tmax {
|
} else if defaultTimeout > max {
|
||||||
tmax = defaultTimeout
|
max = defaultTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
routableNetworks := new(bart.Lite)
|
localIps := new(bart.Table[struct{}])
|
||||||
var assignedNetworks []netip.Prefix
|
var assignedCIDR netip.Prefix
|
||||||
for _, network := range c.Networks() {
|
var assignedSet bool
|
||||||
nprefix := netip.PrefixFrom(network.Addr(), network.Addr().BitLen())
|
for _, ip := range c.Details.Ips {
|
||||||
routableNetworks.Insert(nprefix)
|
//TODO: IPV6-WORK the unmap is a bit unfortunate
|
||||||
assignedNetworks = append(assignedNetworks, network)
|
nip, _ := netip.AddrFromSlice(ip.IP)
|
||||||
|
nip = nip.Unmap()
|
||||||
|
nprefix := netip.PrefixFrom(nip, nip.BitLen())
|
||||||
|
localIps.Insert(nprefix, struct{}{})
|
||||||
|
|
||||||
|
if !assignedSet {
|
||||||
|
// Only grabbing the first one in the cert since any more than that currently has undefined behavior
|
||||||
|
assignedCIDR = nprefix
|
||||||
|
assignedSet = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hasUnsafeNetworks := false
|
for _, n := range c.Details.Subnets {
|
||||||
for _, n := range c.UnsafeNetworks() {
|
nip, _ := netip.AddrFromSlice(n.IP)
|
||||||
routableNetworks.Insert(n)
|
ones, _ := n.Mask.Size()
|
||||||
hasUnsafeNetworks = true
|
nip = nip.Unmap()
|
||||||
|
localIps.Insert(netip.PrefixFrom(nip, ones), struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Firewall{
|
return &Firewall{
|
||||||
Conntrack: &FirewallConntrack{
|
Conntrack: &FirewallConntrack{
|
||||||
Conns: make(map[firewall.Packet]*conn),
|
Conns: make(map[firewall.Packet]*conn),
|
||||||
TimerWheel: NewTimerWheel[firewall.Packet](tmin, tmax),
|
TimerWheel: NewTimerWheel[firewall.Packet](min, max),
|
||||||
},
|
},
|
||||||
InRules: newFirewallTable(),
|
InRules: newFirewallTable(),
|
||||||
OutRules: newFirewallTable(),
|
OutRules: newFirewallTable(),
|
||||||
TCPTimeout: tcpTimeout,
|
TCPTimeout: tcpTimeout,
|
||||||
UDPTimeout: UDPTimeout,
|
UDPTimeout: UDPTimeout,
|
||||||
DefaultTimeout: defaultTimeout,
|
DefaultTimeout: defaultTimeout,
|
||||||
routableNetworks: routableNetworks,
|
localIps: localIps,
|
||||||
assignedNetworks: assignedNetworks,
|
assignedCIDR: assignedCIDR,
|
||||||
hasUnsafeNetworks: hasUnsafeNetworks,
|
hasSubnets: len(c.Details.Subnets) > 0,
|
||||||
l: l,
|
l: l,
|
||||||
|
|
||||||
incomingMetrics: firewallMetrics{
|
incomingMetrics: firewallMetrics{
|
||||||
droppedLocalAddr: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_addr", nil),
|
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
|
||||||
droppedRemoteAddr: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_addr", nil),
|
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
|
||||||
droppedNoRule: metrics.GetOrRegisterCounter("firewall.incoming.dropped.no_rule", nil),
|
droppedNoRule: metrics.GetOrRegisterCounter("firewall.incoming.dropped.no_rule", nil),
|
||||||
},
|
},
|
||||||
outgoingMetrics: firewallMetrics{
|
outgoingMetrics: firewallMetrics{
|
||||||
droppedLocalAddr: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.local_addr", nil),
|
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.local_ip", nil),
|
||||||
droppedRemoteAddr: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.remote_addr", nil),
|
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.remote_ip", nil),
|
||||||
droppedNoRule: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.no_rule", nil),
|
droppedNoRule: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.no_rule", nil),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFirewallFromConfig(l *logrus.Logger, cs *CertState, c *config.C) (*Firewall, error) {
|
func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *config.C) (*Firewall, error) {
|
||||||
certificate := cs.getCertificate(cert.Version2)
|
|
||||||
if certificate == nil {
|
|
||||||
certificate = cs.getCertificate(cert.Version1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if certificate == nil {
|
|
||||||
panic("No certificate available to reconfigure the firewall")
|
|
||||||
}
|
|
||||||
|
|
||||||
fw := NewFirewall(
|
fw := NewFirewall(
|
||||||
l,
|
l,
|
||||||
c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12),
|
c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12),
|
||||||
c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3),
|
c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3),
|
||||||
c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10),
|
c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10),
|
||||||
certificate,
|
nc,
|
||||||
//TODO: max_connections
|
//TODO: max_connections
|
||||||
)
|
)
|
||||||
|
|
||||||
fw.defaultLocalCIDRAny = c.GetBool("firewall.default_local_cidr_any", false)
|
//TODO: Flip to false after v1.9 release
|
||||||
|
fw.defaultLocalCIDRAny = c.GetBool("firewall.default_local_cidr_any", true)
|
||||||
|
|
||||||
inboundAction := c.GetString("firewall.inbound_action", "drop")
|
inboundAction := c.GetString("firewall.inbound_action", "drop")
|
||||||
switch inboundAction {
|
switch inboundAction {
|
||||||
@ -289,7 +287,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
|||||||
fp = ft.TCP
|
fp = ft.TCP
|
||||||
case firewall.ProtoUDP:
|
case firewall.ProtoUDP:
|
||||||
fp = ft.UDP
|
fp = ft.UDP
|
||||||
case firewall.ProtoICMP, firewall.ProtoICMPv6:
|
case firewall.ProtoICMP:
|
||||||
fp = ft.ICMP
|
fp = ft.ICMP
|
||||||
case firewall.ProtoAny:
|
case firewall.ProtoAny:
|
||||||
fp = ft.AnyProto
|
fp = ft.AnyProto
|
||||||
@ -331,7 +329,7 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
rs, ok := r.([]any)
|
rs, ok := r.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%s failed to parse, should be an array of rules", table)
|
return fmt.Errorf("%s failed to parse, should be an array of rules", table)
|
||||||
}
|
}
|
||||||
@ -423,29 +421,33 @@ var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
|
|||||||
|
|
||||||
// Drop returns an error if the packet should be dropped, explaining why. It
|
// Drop returns an error if the packet should be dropped, explaining why. It
|
||||||
// returns nil if the packet should not be dropped.
|
// returns nil if the packet should not be dropped.
|
||||||
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache) error {
|
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
||||||
// Check if we spoke to this tuple, if we did then allow this packet
|
// Check if we spoke to this tuple, if we did then allow this packet
|
||||||
if f.inConns(fp, h, caPool, localCache) {
|
if f.inConns(fp, h, caPool, localCache) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure remote address matches nebula certificate
|
// Make sure remote address matches nebula certificate
|
||||||
if h.networks != nil {
|
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
||||||
if !h.networks.Contains(fp.RemoteAddr) {
|
//TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
|
||||||
f.metrics(incoming).droppedRemoteAddr.Inc(1)
|
_, ok := remoteCidr.Lookup(fp.RemoteIP)
|
||||||
|
if !ok {
|
||||||
|
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||||
return ErrInvalidRemoteIP
|
return ErrInvalidRemoteIP
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Simple case: Certificate has one address and no unsafe networks
|
// Simple case: Certificate has one IP and no subnets
|
||||||
if h.vpnAddrs[0] != fp.RemoteAddr {
|
if fp.RemoteIP != h.vpnIp {
|
||||||
f.metrics(incoming).droppedRemoteAddr.Inc(1)
|
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||||
return ErrInvalidRemoteIP
|
return ErrInvalidRemoteIP
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we are supposed to be handling this local ip address
|
// Make sure we are supposed to be handling this local ip address
|
||||||
if !f.routableNetworks.Contains(fp.LocalAddr) {
|
//TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
|
||||||
f.metrics(incoming).droppedLocalAddr.Inc(1)
|
_, ok := f.localIps.Lookup(fp.LocalIP)
|
||||||
|
if !ok {
|
||||||
|
f.metrics(incoming).droppedLocalIP.Inc(1)
|
||||||
return ErrInvalidLocalIP
|
return ErrInvalidLocalIP
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,7 +492,7 @@ func (f *Firewall) EmitStats() {
|
|||||||
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache) bool {
|
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||||
if localCache != nil {
|
if localCache != nil {
|
||||||
if _, ok := localCache[fp]; ok {
|
if _, ok := localCache[fp]; ok {
|
||||||
return true
|
return true
|
||||||
@ -617,7 +619,7 @@ func (f *Firewall) evict(p firewall.Packet) {
|
|||||||
delete(conntrack.Conns, p)
|
delete(conntrack.Conns, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.CachedCertificate, caPool *cert.CAPool) bool {
|
func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
||||||
if ft.AnyProto.match(p, incoming, c, caPool) {
|
if ft.AnyProto.match(p, incoming, c, caPool) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -631,7 +633,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.CachedC
|
|||||||
if ft.UDP.match(p, incoming, c, caPool) {
|
if ft.UDP.match(p, incoming, c, caPool) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case firewall.ProtoICMP, firewall.ProtoICMPv6:
|
case firewall.ProtoICMP:
|
||||||
if ft.ICMP.match(p, incoming, c, caPool) {
|
if ft.ICMP.match(p, incoming, c, caPool) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -661,7 +663,7 @@ func (fp firewallPort) addRule(f *Firewall, startPort int32, endPort int32, grou
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.CachedCertificate, caPool *cert.CAPool) bool {
|
func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
||||||
// We don't have any allowed ports, bail
|
// We don't have any allowed ports, bail
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
return false
|
return false
|
||||||
@ -724,7 +726,7 @@ func (fc *FirewallCA) addRule(f *Firewall, groups []string, host string, ip, loc
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FirewallCA) match(p firewall.Packet, c *cert.CachedCertificate, caPool *cert.CAPool) bool {
|
func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
|
||||||
if fc == nil {
|
if fc == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -733,24 +735,24 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.CachedCertificate, caPool
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if t, ok := fc.CAShas[c.Certificate.Issuer()]; ok {
|
if t, ok := fc.CAShas[c.Details.Issuer]; ok {
|
||||||
if t.match(p, c) {
|
if t.match(p, c) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := caPool.GetCAForCert(c.Certificate)
|
s, err := caPool.GetCAForCert(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return fc.CANames[s.Certificate.Name()].match(p, c)
|
return fc.CANames[s.Details.Name].match(p, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
|
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
|
||||||
flc := func() *firewallLocalCIDR {
|
flc := func() *firewallLocalCIDR {
|
||||||
return &firewallLocalCIDR{
|
return &firewallLocalCIDR{
|
||||||
LocalCIDR: new(bart.Lite),
|
LocalCIDR: new(bart.Table[struct{}]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -824,7 +826,7 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip netip.Prefix) boo
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *FirewallRule) match(p firewall.Packet, c *cert.CachedCertificate) bool {
|
func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool {
|
||||||
if fr == nil {
|
if fr == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -839,7 +841,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.CachedCertificate) bool
|
|||||||
found := false
|
found := false
|
||||||
|
|
||||||
for _, g := range sg.Groups {
|
for _, g := range sg.Groups {
|
||||||
if _, ok := c.InvertedGroups[g]; !ok {
|
if _, ok := c.Details.InvertedGroups[g]; !ok {
|
||||||
found = false
|
found = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -853,44 +855,42 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.CachedCertificate) bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if fr.Hosts != nil {
|
if fr.Hosts != nil {
|
||||||
if flc, ok := fr.Hosts[c.Certificate.Name()]; ok {
|
if flc, ok := fr.Hosts[c.Details.Name]; ok {
|
||||||
if flc.match(p, c) {
|
if flc.match(p, c) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range fr.CIDR.Supernets(netip.PrefixFrom(p.RemoteAddr, p.RemoteAddr.BitLen())) {
|
matched := false
|
||||||
if v.match(p, c) {
|
prefix := netip.PrefixFrom(p.RemoteIP, p.RemoteIP.BitLen())
|
||||||
return true
|
fr.CIDR.EachLookupPrefix(prefix, func(prefix netip.Prefix, val *firewallLocalCIDR) bool {
|
||||||
|
if prefix.Contains(p.RemoteIP) && val.match(p, c) {
|
||||||
|
matched = true
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
}
|
return true
|
||||||
|
})
|
||||||
return false
|
return matched
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
|
func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
|
||||||
if !localIp.IsValid() {
|
if !localIp.IsValid() {
|
||||||
if !f.hasUnsafeNetworks || f.defaultLocalCIDRAny {
|
if !f.hasSubnets || f.defaultLocalCIDRAny {
|
||||||
flc.Any = true
|
flc.Any = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, network := range f.assignedNetworks {
|
localIp = f.assignedCIDR
|
||||||
flc.LocalCIDR.Insert(network)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
} else if localIp.Bits() == 0 {
|
} else if localIp.Bits() == 0 {
|
||||||
flc.Any = true
|
flc.Any = true
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
flc.LocalCIDR.Insert(localIp)
|
flc.LocalCIDR.Insert(localIp, struct{}{})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.CachedCertificate) bool {
|
func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.NebulaCertificate) bool {
|
||||||
if flc == nil {
|
if flc == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -899,7 +899,8 @@ func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.CachedCertificate
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return flc.LocalCIDR.Contains(p.LocalAddr)
|
_, ok := flc.LocalCIDR.Lookup(p.LocalIP)
|
||||||
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
type rule struct {
|
type rule struct {
|
||||||
@ -915,15 +916,15 @@ type rule struct {
|
|||||||
CASha string
|
CASha string
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertRule(l *logrus.Logger, p any, table string, i int) (rule, error) {
|
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
|
||||||
r := rule{}
|
r := rule{}
|
||||||
|
|
||||||
m, ok := p.(map[string]any)
|
m, ok := p.(map[interface{}]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return r, errors.New("could not parse rule")
|
return r, errors.New("could not parse rule")
|
||||||
}
|
}
|
||||||
|
|
||||||
toString := func(k string, m map[string]any) string {
|
toString := func(k string, m map[interface{}]interface{}) string {
|
||||||
v, ok := m[k]
|
v, ok := m[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ""
|
return ""
|
||||||
@ -941,7 +942,7 @@ func convertRule(l *logrus.Logger, p any, table string, i int) (rule, error) {
|
|||||||
r.CASha = toString("ca_sha", m)
|
r.CASha = toString("ca_sha", m)
|
||||||
|
|
||||||
// Make sure group isn't an array
|
// Make sure group isn't an array
|
||||||
if v, ok := m["group"].([]any); ok {
|
if v, ok := m["group"].([]interface{}); ok {
|
||||||
if len(v) > 1 {
|
if len(v) > 1 {
|
||||||
return r, errors.New("group should contain a single value, an array with more than one entry was provided")
|
return r, errors.New("group should contain a single value, an array with more than one entry was provided")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,22 +6,21 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
)
|
)
|
||||||
|
|
||||||
type m = map[string]any
|
type m map[string]interface{}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
|
ProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
|
||||||
ProtoTCP = 6
|
ProtoTCP = 6
|
||||||
ProtoUDP = 17
|
ProtoUDP = 17
|
||||||
ProtoICMP = 1
|
ProtoICMP = 1
|
||||||
ProtoICMPv6 = 58
|
|
||||||
|
|
||||||
PortAny = 0 // Special value for matching `port: any`
|
PortAny = 0 // Special value for matching `port: any`
|
||||||
PortFragment = -1 // Special value for matching `port: fragment`
|
PortFragment = -1 // Special value for matching `port: fragment`
|
||||||
)
|
)
|
||||||
|
|
||||||
type Packet struct {
|
type Packet struct {
|
||||||
LocalAddr netip.Addr
|
LocalIP netip.Addr
|
||||||
RemoteAddr netip.Addr
|
RemoteIP netip.Addr
|
||||||
LocalPort uint16
|
LocalPort uint16
|
||||||
RemotePort uint16
|
RemotePort uint16
|
||||||
Protocol uint8
|
Protocol uint8
|
||||||
@ -30,8 +29,8 @@ type Packet struct {
|
|||||||
|
|
||||||
func (fp *Packet) Copy() *Packet {
|
func (fp *Packet) Copy() *Packet {
|
||||||
return &Packet{
|
return &Packet{
|
||||||
LocalAddr: fp.LocalAddr,
|
LocalIP: fp.LocalIP,
|
||||||
RemoteAddr: fp.RemoteAddr,
|
RemoteIP: fp.RemoteIP,
|
||||||
LocalPort: fp.LocalPort,
|
LocalPort: fp.LocalPort,
|
||||||
RemotePort: fp.RemotePort,
|
RemotePort: fp.RemotePort,
|
||||||
Protocol: fp.Protocol,
|
Protocol: fp.Protocol,
|
||||||
@ -52,8 +51,8 @@ func (fp Packet) MarshalJSON() ([]byte, error) {
|
|||||||
proto = fmt.Sprintf("unknown %v", fp.Protocol)
|
proto = fmt.Sprintf("unknown %v", fp.Protocol)
|
||||||
}
|
}
|
||||||
return json.Marshal(m{
|
return json.Marshal(m{
|
||||||
"LocalAddr": fp.LocalAddr.String(),
|
"LocalIP": fp.LocalIP.String(),
|
||||||
"RemoteAddr": fp.RemoteAddr.String(),
|
"RemoteIP": fp.RemoteIP.String(),
|
||||||
"LocalPort": fp.LocalPort,
|
"LocalPort": fp.LocalPort,
|
||||||
"RemotePort": fp.RemotePort,
|
"RemotePort": fp.RemotePort,
|
||||||
"Protocol": proto,
|
"Protocol": proto,
|
||||||
|
|||||||
529
firewall_test.go
529
firewall_test.go
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -13,12 +14,11 @@ import (
|
|||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewFirewall(t *testing.T) {
|
func TestNewFirewall(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := &dummyCert{}
|
c := &cert.NebulaCertificate{}
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
conntrack := fw.Conntrack
|
conntrack := fw.Conntrack
|
||||||
assert.NotNil(t, conntrack)
|
assert.NotNil(t, conntrack)
|
||||||
@ -60,67 +60,67 @@ func TestFirewall_AddRule(t *testing.T) {
|
|||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
c := &dummyCert{}
|
c := &cert.NebulaCertificate{}
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
assert.NotNil(t, fw.InRules)
|
assert.NotNil(t, fw.InRules)
|
||||||
assert.NotNil(t, fw.OutRules)
|
assert.NotNil(t, fw.OutRules)
|
||||||
|
|
||||||
ti, err := netip.ParsePrefix("1.2.3.4/32")
|
ti, err := netip.ParsePrefix("1.2.3.4/32")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
// An empty rule is any
|
// An empty rule is any
|
||||||
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
||||||
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
assert.Nil(t, fw.InRules.UDP[1].Any.Any)
|
assert.Nil(t, fw.InRules.UDP[1].Any.Any)
|
||||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0].Groups, "g1")
|
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0].Groups, "g1")
|
||||||
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
assert.Nil(t, fw.InRules.ICMP[1].Any.Any)
|
assert.Nil(t, fw.InRules.ICMP[1].Any.Any)
|
||||||
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
||||||
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, netip.Prefix{}, "", ""))
|
||||||
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
_, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
|
_, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
|
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", "ca-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", "ca-sha"))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
anyIp, err := netip.ParsePrefix("0.0.0.0/0")
|
anyIp, err := netip.ParsePrefix("0.0.0.0/0")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||||
|
|
||||||
// Test error conditions
|
// Test error conditions
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
require.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop(t *testing.T) {
|
func TestFirewall_Drop(t *testing.T) {
|
||||||
@ -129,74 +129,79 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
RemoteAddr: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
LocalPort: 10,
|
LocalPort: 10,
|
||||||
RemotePort: 90,
|
RemotePort: 90,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
Fragment: false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
c := dummyCert{
|
ipNet := net.IPNet{
|
||||||
name: "host1",
|
IP: net.IPv4(1, 2, 3, 4),
|
||||||
networks: []netip.Prefix{netip.MustParsePrefix("1.2.3.4/24")},
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
groups: []string{"default-group"},
|
}
|
||||||
issuer: "signer-shasum",
|
|
||||||
|
c := cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "host1",
|
||||||
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
Groups: []string{"default-group"},
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||||
|
Issuer: "signer-shasum",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
h := HostInfo{
|
h := HostInfo{
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &cert.CachedCertificate{
|
peerCert: &c,
|
||||||
Certificate: &c,
|
|
||||||
InvertedGroups: map[string]struct{}{"default-group": {}},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
vpnAddrs: []netip.Addr{netip.MustParseAddr("1.2.3.4")},
|
vpnIp: netip.MustParseAddr("1.2.3.4"),
|
||||||
}
|
}
|
||||||
h.buildNetworks(c.networks, c.unsafeNetworks)
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
assert.Equal(t, ErrNoMatchingRule, fw.Drop(p, false, &h, cp, nil))
|
assert.Equal(t, ErrNoMatchingRule, fw.Drop(p, false, &h, cp, nil))
|
||||||
// Allow inbound
|
// Allow inbound
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
// Allow outbound because conntrack
|
// Allow outbound because conntrack
|
||||||
require.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||||
|
|
||||||
// test remote mismatch
|
// test remote mismatch
|
||||||
oldRemote := p.RemoteAddr
|
oldRemote := p.RemoteIP
|
||||||
p.RemoteAddr = netip.MustParseAddr("1.2.3.10")
|
p.RemoteIP = netip.MustParseAddr("1.2.3.10")
|
||||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||||
p.RemoteAddr = oldRemote
|
p.RemoteIP = oldRemote
|
||||||
|
|
||||||
// ensure signer doesn't get in the way of group checks
|
// ensure signer doesn't get in the way of group checks
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caSha doesn't drop on match
|
// test caSha doesn't drop on match
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
|
|
||||||
// ensure ca name doesn't get in the way of group checks
|
// ensure ca name doesn't get in the way of group checks
|
||||||
cp.CAs["signer-shasum"] = &cert.CachedCertificate{Certificate: &dummyCert{name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// test caName doesn't drop on match
|
// test caName doesn't drop on match
|
||||||
cp.CAs["signer-shasum"] = &cert.CachedCertificate{Certificate: &dummyCert{name: "ca-good"}}
|
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkFirewallTable_match(b *testing.B) {
|
func BenchmarkFirewallTable_match(b *testing.B) {
|
||||||
@ -212,9 +217,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
|
|
||||||
b.Run("fail on proto", func(b *testing.B) {
|
b.Run("fail on proto", func(b *testing.B) {
|
||||||
// This benchmark is showing us the cost of failing to match the protocol
|
// This benchmark is showing us the cost of failing to match the protocol
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.NebulaCertificate{}
|
||||||
Certificate: &dummyCert{},
|
|
||||||
}
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp))
|
||||||
}
|
}
|
||||||
@ -222,31 +225,28 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
|
|
||||||
b.Run("pass proto, fail on port", func(b *testing.B) {
|
b.Run("pass proto, fail on port", func(b *testing.B) {
|
||||||
// This benchmark is showing us the cost of matching a specific protocol but failing to match the port
|
// This benchmark is showing us the cost of matching a specific protocol but failing to match the port
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.NebulaCertificate{}
|
||||||
Certificate: &dummyCert{},
|
|
||||||
}
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Run("pass proto, port, fail on local CIDR", func(b *testing.B) {
|
b.Run("pass proto, port, fail on local CIDR", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.NebulaCertificate{}
|
||||||
Certificate: &dummyCert{},
|
|
||||||
}
|
|
||||||
ip := netip.MustParsePrefix("9.254.254.254/32")
|
ip := netip.MustParsePrefix("9.254.254.254/32")
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: ip.Addr()}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip.Addr()}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
|
||||||
Certificate: &dummyCert{
|
c := &cert.NebulaCertificate{
|
||||||
name: "nope",
|
Details: cert.NebulaCertificateDetails{
|
||||||
networks: []netip.Prefix{netip.MustParsePrefix("9.254.254.245/32")},
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
Name: "nope",
|
||||||
|
Ips: []*net.IPNet{ip},
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||||
@ -254,24 +254,25 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
|
||||||
Certificate: &dummyCert{
|
c := &cert.NebulaCertificate{
|
||||||
name: "nope",
|
Details: cert.NebulaCertificateDetails{
|
||||||
networks: []netip.Prefix{netip.MustParsePrefix("9.254.254.245/32")},
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
Name: "nope",
|
||||||
|
Ips: []*net.IPNet{ip},
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Run("pass on group on any local cidr", func(b *testing.B) {
|
b.Run("pass on group on any local cidr", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "nope",
|
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||||
|
Name: "nope",
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"good-group": {}},
|
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||||
@ -279,28 +280,82 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
b.Run("pass on group on specific local cidr", func(b *testing.B) {
|
b.Run("pass on group on specific local cidr", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "nope",
|
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||||
|
Name: "nope",
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"good-group": {}},
|
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Run("pass on name", func(b *testing.B) {
|
b.Run("pass on name", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "good-host",
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
Name: "good-host",
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
//
|
||||||
|
//b.Run("pass on ip", func(b *testing.B) {
|
||||||
|
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
// c := &cert.NebulaCertificate{
|
||||||
|
// Details: cert.NebulaCertificateDetails{
|
||||||
|
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
// Name: "good-host",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
// for n := 0; n < b.N; n++ {
|
||||||
|
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
|
||||||
|
// }
|
||||||
|
//})
|
||||||
|
//
|
||||||
|
//b.Run("pass on local ip", func(b *testing.B) {
|
||||||
|
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
// c := &cert.NebulaCertificate{
|
||||||
|
// Details: cert.NebulaCertificateDetails{
|
||||||
|
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
// Name: "good-host",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
// for n := 0; n < b.N; n++ {
|
||||||
|
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
|
||||||
|
// }
|
||||||
|
//})
|
||||||
|
//
|
||||||
|
//_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
|
||||||
|
//
|
||||||
|
//b.Run("pass on ip with any port", func(b *testing.B) {
|
||||||
|
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
// c := &cert.NebulaCertificate{
|
||||||
|
// Details: cert.NebulaCertificateDetails{
|
||||||
|
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
// Name: "good-host",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
// for n := 0; n < b.N; n++ {
|
||||||
|
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||||
|
// }
|
||||||
|
//})
|
||||||
|
//
|
||||||
|
//b.Run("pass on local ip with any port", func(b *testing.B) {
|
||||||
|
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||||
|
// c := &cert.NebulaCertificate{
|
||||||
|
// Details: cert.NebulaCertificateDetails{
|
||||||
|
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
// Name: "good-host",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
// for n := 0; n < b.N; n++ {
|
||||||
|
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
|
||||||
|
// }
|
||||||
|
//})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop2(t *testing.T) {
|
func TestFirewall_Drop2(t *testing.T) {
|
||||||
@ -309,55 +364,57 @@ func TestFirewall_Drop2(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
RemoteAddr: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
LocalPort: 10,
|
LocalPort: 10,
|
||||||
RemotePort: 90,
|
RemotePort: 90,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
Fragment: false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
network := netip.MustParsePrefix("1.2.3.4/24")
|
ipNet := net.IPNet{
|
||||||
|
IP: net.IPv4(1, 2, 3, 4),
|
||||||
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
|
}
|
||||||
|
|
||||||
c := cert.CachedCertificate{
|
c := cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "host1",
|
Name: "host1",
|
||||||
networks: []netip.Prefix{network},
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}, "test-group": {}},
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"default-group": {}, "test-group": {}},
|
|
||||||
}
|
}
|
||||||
h := HostInfo{
|
h := HostInfo{
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c,
|
peerCert: &c,
|
||||||
},
|
},
|
||||||
vpnAddrs: []netip.Addr{network.Addr()},
|
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||||
}
|
}
|
||||||
h.buildNetworks(c.Certificate.Networks(), c.Certificate.UnsafeNetworks())
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
c1 := cert.CachedCertificate{
|
c1 := cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "host1",
|
Name: "host1",
|
||||||
networks: []netip.Prefix{network},
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}, "test-group-not": {}},
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"default-group": {}, "test-group-not": {}},
|
|
||||||
}
|
}
|
||||||
h1 := HostInfo{
|
h1 := HostInfo{
|
||||||
vpnAddrs: []netip.Addr{network.Addr()},
|
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c1,
|
peerCert: &c1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
h1.buildNetworks(c1.Certificate.Networks(), c1.Certificate.UnsafeNetworks())
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// h1/c1 lacks the proper groups
|
// h1/c1 lacks the proper groups
|
||||||
require.ErrorIs(t, fw.Drop(p, true, &h1, cp, nil), ErrNoMatchingRule)
|
assert.Error(t, fw.Drop(p, true, &h1, cp, nil), ErrNoMatchingRule)
|
||||||
// c has the proper groups
|
// c has the proper groups
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_Drop3(t *testing.T) {
|
func TestFirewall_Drop3(t *testing.T) {
|
||||||
@ -366,85 +423,84 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
RemoteAddr: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
LocalPort: 1,
|
LocalPort: 1,
|
||||||
RemotePort: 1,
|
RemotePort: 1,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
Fragment: false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
network := netip.MustParsePrefix("1.2.3.4/24")
|
ipNet := net.IPNet{
|
||||||
c := cert.CachedCertificate{
|
IP: net.IPv4(1, 2, 3, 4),
|
||||||
Certificate: &dummyCert{
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
name: "host-owner",
|
}
|
||||||
networks: []netip.Prefix{network},
|
|
||||||
|
c := cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "host-owner",
|
||||||
|
Ips: []*net.IPNet{&ipNet},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
c1 := cert.CachedCertificate{
|
c1 := cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "host1",
|
Name: "host1",
|
||||||
networks: []netip.Prefix{network},
|
Ips: []*net.IPNet{&ipNet},
|
||||||
issuer: "signer-sha-bad",
|
Issuer: "signer-sha-bad",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
h1 := HostInfo{
|
h1 := HostInfo{
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c1,
|
peerCert: &c1,
|
||||||
},
|
},
|
||||||
vpnAddrs: []netip.Addr{network.Addr()},
|
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||||
}
|
}
|
||||||
h1.buildNetworks(c1.Certificate.Networks(), c1.Certificate.UnsafeNetworks())
|
h1.CreateRemoteCIDR(&c1)
|
||||||
|
|
||||||
c2 := cert.CachedCertificate{
|
c2 := cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "host2",
|
Name: "host2",
|
||||||
networks: []netip.Prefix{network},
|
Ips: []*net.IPNet{&ipNet},
|
||||||
issuer: "signer-sha",
|
Issuer: "signer-sha",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
h2 := HostInfo{
|
h2 := HostInfo{
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c2,
|
peerCert: &c2,
|
||||||
},
|
},
|
||||||
vpnAddrs: []netip.Addr{network.Addr()},
|
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||||
}
|
}
|
||||||
h2.buildNetworks(c2.Certificate.Networks(), c2.Certificate.UnsafeNetworks())
|
h2.CreateRemoteCIDR(&c2)
|
||||||
|
|
||||||
c3 := cert.CachedCertificate{
|
c3 := cert.NebulaCertificate{
|
||||||
Certificate: &dummyCert{
|
Details: cert.NebulaCertificateDetails{
|
||||||
name: "host3",
|
Name: "host3",
|
||||||
networks: []netip.Prefix{network},
|
Ips: []*net.IPNet{&ipNet},
|
||||||
issuer: "signer-sha-bad",
|
Issuer: "signer-sha-bad",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
h3 := HostInfo{
|
h3 := HostInfo{
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c3,
|
peerCert: &c3,
|
||||||
},
|
},
|
||||||
vpnAddrs: []netip.Addr{network.Addr()},
|
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||||
}
|
}
|
||||||
h3.buildNetworks(c3.Certificate.Networks(), c3.Certificate.UnsafeNetworks())
|
h3.CreateRemoteCIDR(&c3)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-sha"))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-sha"))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// c1 should pass because host match
|
// c1 should pass because host match
|
||||||
require.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
assert.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
||||||
// c2 should pass because ca sha match
|
// c2 should pass because ca sha match
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
require.NoError(t, fw.Drop(p, true, &h2, cp, nil))
|
assert.NoError(t, fw.Drop(p, true, &h2, cp, nil))
|
||||||
// c3 should fail because no match
|
// c3 should fail because no match
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
assert.Equal(t, fw.Drop(p, true, &h3, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop(p, true, &h3, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
// Test a remote address match
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.MustParsePrefix("1.2.3.4/24"), netip.Prefix{}, "", ""))
|
|
||||||
require.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_DropConntrackReload(t *testing.T) {
|
func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||||
@ -453,56 +509,60 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
p := firewall.Packet{
|
p := firewall.Packet{
|
||||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
RemoteAddr: netip.MustParseAddr("1.2.3.4"),
|
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||||
LocalPort: 10,
|
LocalPort: 10,
|
||||||
RemotePort: 90,
|
RemotePort: 90,
|
||||||
Protocol: firewall.ProtoUDP,
|
Protocol: firewall.ProtoUDP,
|
||||||
Fragment: false,
|
Fragment: false,
|
||||||
}
|
}
|
||||||
network := netip.MustParsePrefix("1.2.3.4/24")
|
|
||||||
|
|
||||||
c := cert.CachedCertificate{
|
ipNet := net.IPNet{
|
||||||
Certificate: &dummyCert{
|
IP: net.IPv4(1, 2, 3, 4),
|
||||||
name: "host1",
|
Mask: net.IPMask{255, 255, 255, 0},
|
||||||
networks: []netip.Prefix{network},
|
}
|
||||||
groups: []string{"default-group"},
|
|
||||||
issuer: "signer-shasum",
|
c := cert.NebulaCertificate{
|
||||||
|
Details: cert.NebulaCertificateDetails{
|
||||||
|
Name: "host1",
|
||||||
|
Ips: []*net.IPNet{&ipNet},
|
||||||
|
Groups: []string{"default-group"},
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||||
|
Issuer: "signer-shasum",
|
||||||
},
|
},
|
||||||
InvertedGroups: map[string]struct{}{"default-group": {}},
|
|
||||||
}
|
}
|
||||||
h := HostInfo{
|
h := HostInfo{
|
||||||
ConnectionState: &ConnectionState{
|
ConnectionState: &ConnectionState{
|
||||||
peerCert: &c,
|
peerCert: &c,
|
||||||
},
|
},
|
||||||
vpnAddrs: []netip.Addr{network.Addr()},
|
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||||
}
|
}
|
||||||
h.buildNetworks(c.Certificate.Networks(), c.Certificate.UnsafeNetworks())
|
h.CreateRemoteCIDR(&c)
|
||||||
|
|
||||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
// Drop outbound
|
// Drop outbound
|
||||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
// Allow inbound
|
// Allow inbound
|
||||||
resetConntrack(fw)
|
resetConntrack(fw)
|
||||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
// Allow outbound because conntrack
|
// Allow outbound because conntrack
|
||||||
require.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||||
|
|
||||||
oldFw := fw
|
oldFw := fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
// Allow outbound because conntrack and new rules allow port 10
|
// Allow outbound because conntrack and new rules allow port 10
|
||||||
require.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||||
|
|
||||||
oldFw = fw
|
oldFw = fw
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
fw.Conntrack = oldFw.Conntrack
|
fw.Conntrack = oldFw.Conntrack
|
||||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||||
|
|
||||||
@ -581,105 +641,104 @@ func BenchmarkLookup(b *testing.B) {
|
|||||||
ml(m, a)
|
ml(m, a)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
//TODO: only way array lookup in array will help is if both are sorted, then maybe it's faster
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_parsePort(t *testing.T) {
|
func Test_parsePort(t *testing.T) {
|
||||||
_, _, err := parsePort("")
|
_, _, err := parsePort("")
|
||||||
require.EqualError(t, err, "was not a number; ``")
|
assert.EqualError(t, err, "was not a number; ``")
|
||||||
|
|
||||||
_, _, err = parsePort(" ")
|
_, _, err = parsePort(" ")
|
||||||
require.EqualError(t, err, "was not a number; ` `")
|
assert.EqualError(t, err, "was not a number; ` `")
|
||||||
|
|
||||||
_, _, err = parsePort("-")
|
_, _, err = parsePort("-")
|
||||||
require.EqualError(t, err, "appears to be a range but could not be parsed; `-`")
|
assert.EqualError(t, err, "appears to be a range but could not be parsed; `-`")
|
||||||
|
|
||||||
_, _, err = parsePort(" - ")
|
_, _, err = parsePort(" - ")
|
||||||
require.EqualError(t, err, "appears to be a range but could not be parsed; ` - `")
|
assert.EqualError(t, err, "appears to be a range but could not be parsed; ` - `")
|
||||||
|
|
||||||
_, _, err = parsePort("a-b")
|
_, _, err = parsePort("a-b")
|
||||||
require.EqualError(t, err, "beginning range was not a number; `a`")
|
assert.EqualError(t, err, "beginning range was not a number; `a`")
|
||||||
|
|
||||||
_, _, err = parsePort("1-b")
|
_, _, err = parsePort("1-b")
|
||||||
require.EqualError(t, err, "ending range was not a number; `b`")
|
assert.EqualError(t, err, "ending range was not a number; `b`")
|
||||||
|
|
||||||
s, e, err := parsePort(" 1 - 2 ")
|
s, e, err := parsePort(" 1 - 2 ")
|
||||||
assert.Equal(t, int32(1), s)
|
assert.Equal(t, int32(1), s)
|
||||||
assert.Equal(t, int32(2), e)
|
assert.Equal(t, int32(2), e)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
s, e, err = parsePort("0-1")
|
s, e, err = parsePort("0-1")
|
||||||
assert.Equal(t, int32(0), s)
|
assert.Equal(t, int32(0), s)
|
||||||
assert.Equal(t, int32(0), e)
|
assert.Equal(t, int32(0), e)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
s, e, err = parsePort("9919")
|
s, e, err = parsePort("9919")
|
||||||
assert.Equal(t, int32(9919), s)
|
assert.Equal(t, int32(9919), s)
|
||||||
assert.Equal(t, int32(9919), e)
|
assert.Equal(t, int32(9919), e)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
s, e, err = parsePort("any")
|
s, e, err = parsePort("any")
|
||||||
assert.Equal(t, int32(0), s)
|
assert.Equal(t, int32(0), s)
|
||||||
assert.Equal(t, int32(0), e)
|
assert.Equal(t, int32(0), e)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFirewallFromConfig(t *testing.T) {
|
func TestNewFirewallFromConfig(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
// Test a bad rule definition
|
// Test a bad rule definition
|
||||||
c := &dummyCert{}
|
c := &cert.NebulaCertificate{}
|
||||||
cs, err := newCertState(cert.Version2, nil, c, false, cert.Curve_CURVE25519, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
conf := config.NewC(l)
|
conf := config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": "asdf"}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err := NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
|
assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
|
||||||
|
|
||||||
// Test both port and code
|
// Test both port and code
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "code": "2"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
|
assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
|
||||||
|
|
||||||
// Test missing host, group, cidr, ca_name and ca_sha
|
// Test missing host, group, cidr, ca_name and ca_sha
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided")
|
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided")
|
||||||
|
|
||||||
// Test code/port error
|
// Test code/port error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "a", "host": "testh"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
|
assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
|
||||||
|
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "a", "host": "testh"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
|
assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
|
||||||
|
|
||||||
// Test proto error
|
// Test proto error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "1", "host": "testh"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
|
assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
|
||||||
|
|
||||||
// Test cidr parse error
|
// Test cidr parse error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "1", "cidr": "testh", "proto": "any"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
||||||
|
|
||||||
// Test local_cidr parse error
|
// Test local_cidr parse error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
||||||
|
|
||||||
// Test both group and groups
|
// Test both group and groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
||||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
_, err = NewFirewallFromConfig(l, c, conf)
|
||||||
require.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
|
assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddFirewallRulesFromConfig(t *testing.T) {
|
func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||||
@ -687,87 +746,87 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
|||||||
// Test adding tcp rule
|
// Test adding tcp rule
|
||||||
conf := config.NewC(l)
|
conf := config.NewC(l)
|
||||||
mf := &mockFirewall{}
|
mf := &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "proto": "tcp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding udp rule
|
// Test adding udp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "proto": "udp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding icmp rule
|
// Test adding icmp rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "proto": "icmp", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding any rule
|
// Test adding any rule
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with cidr
|
// Test adding rule with cidr
|
||||||
cidr := netip.MustParsePrefix("10.0.0.0/8")
|
cidr := netip.MustParsePrefix("10.0.0.0/8")
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with local_cidr
|
// Test adding rule with local_cidr
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_sha
|
// Test adding rule with ca_sha
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caSha: "12312313123"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caSha: "12312313123"}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_name
|
// Test adding rule with ca_name
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caName: "root01"}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caName: "root01"}, mf.lastCall)
|
||||||
|
|
||||||
// Test single group
|
// Test single group
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "group": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test single groups
|
// Test single groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "groups": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test multiple AND groups
|
// Test multiple AND groups
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
// Test Add error
|
// Test Add error
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
mf.nextCallReturn = errors.New("test error")
|
mf.nextCallReturn = errors.New("test error")
|
||||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "host": "a"}}}
|
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||||
require.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFirewall_convertRule(t *testing.T) {
|
func TestFirewall_convertRule(t *testing.T) {
|
||||||
@ -776,33 +835,33 @@ func TestFirewall_convertRule(t *testing.T) {
|
|||||||
l.SetOutput(ob)
|
l.SetOutput(ob)
|
||||||
|
|
||||||
// Ensure group array of 1 is converted and a warning is printed
|
// Ensure group array of 1 is converted and a warning is printed
|
||||||
c := map[string]any{
|
c := map[interface{}]interface{}{
|
||||||
"group": []any{"group1"},
|
"group": []interface{}{"group1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := convertRule(l, c, "test", 1)
|
r, err := convertRule(l, c, "test", 1)
|
||||||
assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value")
|
assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value")
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, "group1", r.Group)
|
assert.Equal(t, "group1", r.Group)
|
||||||
|
|
||||||
// Ensure group array of > 1 is errord
|
// Ensure group array of > 1 is errord
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
c = map[string]any{
|
c = map[interface{}]interface{}{
|
||||||
"group": []any{"group1", "group2"},
|
"group": []interface{}{"group1", "group2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err = convertRule(l, c, "test", 1)
|
r, err = convertRule(l, c, "test", 1)
|
||||||
assert.Empty(t, ob.String())
|
assert.Equal(t, "", ob.String())
|
||||||
require.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
|
assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
|
||||||
|
|
||||||
// Make sure a well formed group is alright
|
// Make sure a well formed group is alright
|
||||||
ob.Reset()
|
ob.Reset()
|
||||||
c = map[string]any{
|
c = map[interface{}]interface{}{
|
||||||
"group": "group1",
|
"group": "group1",
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err = convertRule(l, c, "test", 1)
|
r, err = convertRule(l, c, "test", 1)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, "group1", r.Group)
|
assert.Equal(t, "group1", r.Group)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
50
go.mod
50
go.mod
@ -1,55 +1,55 @@
|
|||||||
module github.com/slackhq/nebula
|
module github.com/slackhq/nebula
|
||||||
|
|
||||||
go 1.23.0
|
go 1.22.0
|
||||||
|
|
||||||
toolchain go1.24.1
|
toolchain go1.22.2
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.2
|
dario.cat/mergo v1.0.0
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||||
github.com/armon/go-radix v1.0.0
|
github.com/armon/go-radix v1.0.0
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||||
github.com/flynn/noise v1.1.0
|
github.com/flynn/noise v1.1.0
|
||||||
github.com/gaissmai/bart v0.20.4
|
github.com/gaissmai/bart v0.11.1
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/google/gopacket v1.1.19
|
github.com/google/gopacket v1.1.19
|
||||||
github.com/kardianos/service v1.2.2
|
github.com/kardianos/service v1.2.2
|
||||||
github.com/miekg/dns v1.1.65
|
github.com/miekg/dns v1.1.61
|
||||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b
|
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||||
github.com/prometheus/client_golang v1.22.0
|
github.com/prometheus/client_golang v1.19.1
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
||||||
github.com/stretchr/testify v1.10.0
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/vishvananda/netlink v1.3.1
|
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||||
golang.org/x/crypto v0.37.0
|
golang.org/x/crypto v0.26.0
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
||||||
golang.org/x/net v0.39.0
|
golang.org/x/net v0.28.0
|
||||||
golang.org/x/sync v0.13.0
|
golang.org/x/sync v0.8.0
|
||||||
golang.org/x/sys v0.32.0
|
golang.org/x/sys v0.24.0
|
||||||
golang.org/x/term v0.31.0
|
golang.org/x/term v0.23.0
|
||||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||||
google.golang.org/protobuf v1.36.6
|
google.golang.org/protobuf v1.34.2
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/google/btree v1.1.2 // indirect
|
github.com/google/btree v1.1.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
github.com/prometheus/common v0.62.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/vishvananda/netns v0.0.5 // indirect
|
github.com/vishvananda/netns v0.0.4 // indirect
|
||||||
golang.org/x/mod v0.23.0 // indirect
|
golang.org/x/mod v0.18.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
golang.org/x/tools v0.30.0 // indirect
|
golang.org/x/tools v0.22.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
101
go.sum
101
go.sum
@ -1,6 +1,6 @@
|
|||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
@ -14,9 +14,11 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
|||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||||
|
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@ -24,8 +26,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
github.com/gaissmai/bart v0.20.4 h1:Ik47r1fy3jRVU+1eYzKSW3ho2UgBVTVnUS8O993584U=
|
github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc=
|
||||||
github.com/gaissmai/bart v0.20.4/go.mod h1:cEed+ge8dalcbpi8wtS9x9m2hn/fNJH5suhdGQOHnYk=
|
github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
@ -53,8 +55,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
@ -68,8 +70,6 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX
|
|||||||
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
@ -80,19 +80,13 @@ github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3x
|
|||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
|
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
|
||||||
github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
|
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
|
||||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b h1:J/AzCvg5z0Hn1rqZUJjpbzALUmkKX0Zwbc/i4fw7Sfk=
|
|
||||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
|
||||||
@ -106,24 +100,24 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
@ -135,20 +129,21 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
|
|||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw=
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
||||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
|
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
|
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||||
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
|
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||||
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||||
|
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
@ -156,16 +151,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
|||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -176,8 +171,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -185,30 +180,30 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
|
||||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
|
||||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
||||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
@ -219,8 +214,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -239,8 +234,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
|||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@ -251,6 +246,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
449
handshake_ix.go
449
handshake_ix.go
@ -2,12 +2,10 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"slices"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,61 +16,30 @@ import (
|
|||||||
func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
||||||
err := f.handshakeManager.allocateIndex(hh)
|
err := f.handshakeManager.allocateIndex(hh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're connecting to a v6 address we must use a v2 cert
|
certState := f.pki.GetCertState()
|
||||||
cs := f.pki.getCertState()
|
ci := NewConnectionState(f.l, f.cipher, certState, true, noise.HandshakeIX, []byte{}, 0)
|
||||||
v := cs.initiatingVersion
|
|
||||||
for _, a := range hh.hostinfo.vpnAddrs {
|
|
||||||
if a.Is6() {
|
|
||||||
v = cert.Version2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
crt := cs.getCertificate(v)
|
|
||||||
if crt == nil {
|
|
||||||
f.l.WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
|
||||||
WithField("certVersion", v).
|
|
||||||
Error("Unable to handshake with host because no certificate is available")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
crtHs := cs.getHandshakeBytes(v)
|
|
||||||
if crtHs == nil {
|
|
||||||
f.l.WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
|
||||||
WithField("certVersion", v).
|
|
||||||
Error("Unable to handshake with host because no certificate handshake bytes is available")
|
|
||||||
}
|
|
||||||
|
|
||||||
ci, err := NewConnectionState(f.l, cs, crt, true, noise.HandshakeIX)
|
|
||||||
if err != nil {
|
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
|
||||||
WithField("certVersion", v).
|
|
||||||
Error("Failed to create connection state")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
hh.hostinfo.ConnectionState = ci
|
hh.hostinfo.ConnectionState = ci
|
||||||
|
|
||||||
hs := &NebulaHandshake{
|
hsProto := &NebulaHandshakeDetails{
|
||||||
Details: &NebulaHandshakeDetails{
|
InitiatorIndex: hh.hostinfo.localIndexId,
|
||||||
InitiatorIndex: hh.hostinfo.localIndexId,
|
Time: uint64(time.Now().UnixNano()),
|
||||||
Time: uint64(time.Now().UnixNano()),
|
Cert: certState.RawCertificateNoKey,
|
||||||
Cert: crtHs,
|
|
||||||
CertVersion: uint32(v),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hsBytes, err := hs.Marshal()
|
hsBytes := []byte{}
|
||||||
|
|
||||||
|
hs := &NebulaHandshake{
|
||||||
|
Details: hsProto,
|
||||||
|
}
|
||||||
|
hsBytes, err = hs.Marshal()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||||
WithField("certVersion", v).
|
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -81,7 +48,7 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
|||||||
|
|
||||||
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -96,145 +63,80 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
||||||
cs := f.pki.getCertState()
|
certState := f.pki.GetCertState()
|
||||||
crt := cs.GetDefaultCertificate()
|
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
|
||||||
if crt == nil {
|
|
||||||
f.l.WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
|
||||||
WithField("certVersion", cs.initiatingVersion).
|
|
||||||
Error("Unable to handshake with host because no certificate is available")
|
|
||||||
}
|
|
||||||
|
|
||||||
ci, err := NewConnectionState(f.l, cs, crt, false, noise.HandshakeIX)
|
|
||||||
if err != nil {
|
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
Error("Failed to create connection state")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark packet 1 as seen so it doesn't show up as missed
|
// Mark packet 1 as seen so it doesn't show up as missed
|
||||||
ci.window.Update(f.l, 1)
|
ci.window.Update(f.l, 1)
|
||||||
|
|
||||||
msg, _, _, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
msg, _, _, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.ReadMessage")
|
||||||
Error("Failed to call noise.ReadMessage")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hs := &NebulaHandshake{}
|
hs := &NebulaHandshake{}
|
||||||
err = hs.Unmarshal(msg)
|
err = hs.Unmarshal(msg)
|
||||||
|
/*
|
||||||
|
l.Debugln("GOT INDEX: ", hs.Details.InitiatorIndex)
|
||||||
|
*/
|
||||||
if err != nil || hs.Details == nil {
|
if err != nil || hs.Details == nil {
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
|
||||||
Error("Failed unmarshal handshake message")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rc, err := cert.Recombine(cert.Version(hs.Details.CertVersion), hs.Details.Cert, ci.H.PeerStatic(), ci.Curve())
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
Info("Handshake did not contain a certificate")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteCert, err := f.pki.GetCAPool().VerifyCertificate(time.Now(), rc)
|
|
||||||
if err != nil {
|
|
||||||
fp, err := rc.Fingerprint()
|
|
||||||
if err != nil {
|
|
||||||
fp = "<error generating certificate fingerprint>"
|
|
||||||
}
|
|
||||||
|
|
||||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
e := f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
|
||||||
WithField("certVpnNetworks", rc.Networks()).
|
|
||||||
WithField("certFingerprint", fp)
|
|
||||||
|
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level > logrus.DebugLevel {
|
||||||
e = e.WithField("cert", rc)
|
e = e.WithField("cert", remoteCert)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.Info("Invalid certificate from host")
|
e.Info("Invalid certificate from host")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if remoteCert.Certificate.Version() != ci.myCert.Version() {
|
vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
|
||||||
// We started off using the wrong certificate version, lets see if we can match the version that was sent to us
|
if !ok {
|
||||||
rc := cs.getCertificate(remoteCert.Certificate.Version())
|
e := f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
if rc == nil {
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
if f.l.Level > logrus.DebugLevel {
|
||||||
Info("Unable to handshake with host due to missing certificate version")
|
e = e.WithField("cert", remoteCert)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record the certificate we are actually using
|
e.Info("Invalid vpn ip from host")
|
||||||
ci.myCert = rc
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(remoteCert.Certificate.Networks()) == 0 {
|
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("cert", remoteCert).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
Info("No networks in certificate")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var vpnAddrs []netip.Addr
|
vpnIp = vpnIp.Unmap()
|
||||||
var filteredNetworks []netip.Prefix
|
certName := remoteCert.Details.Name
|
||||||
certName := remoteCert.Certificate.Name()
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
certVersion := remoteCert.Certificate.Version()
|
issuer := remoteCert.Details.Issuer
|
||||||
fingerprint := remoteCert.Fingerprint
|
|
||||||
issuer := remoteCert.Certificate.Issuer()
|
|
||||||
|
|
||||||
for _, network := range remoteCert.Certificate.Networks() {
|
if vpnIp == f.myVpnNet.Addr() {
|
||||||
vpnAddr := network.Addr()
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
if f.myVpnAddrsTable.Contains(vpnAddr) {
|
|
||||||
f.l.WithField("vpnAddr", vpnAddr).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
|
||||||
WithField("issuer", issuer).
|
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Refusing to handshake with myself")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// vpnAddrs outside our vpn networks are of no use to us, filter them out
|
|
||||||
if !f.myVpnNetworksTable.Contains(vpnAddr) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
filteredNetworks = append(filteredNetworks, network)
|
|
||||||
vpnAddrs = append(vpnAddrs, vpnAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vpnAddrs) == 0 {
|
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("No usable vpn addresses from host, refusing handshake")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Refusing to handshake with myself")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if addr.IsValid() {
|
if addr.IsValid() {
|
||||||
// addr can be invalid when the tunnel is being relayed.
|
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.Addr()) {
|
||||||
// We only want to apply the remote allow list for direct tunnels here
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
if !f.lightHouse.GetRemoteAllowList().AllowAll(vpnAddrs, addr.Addr()) {
|
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
myIndex, err := generateIndex(f.l)
|
myIndex, err := generateIndex(f.l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||||
@ -245,19 +147,18 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
ConnectionState: ci,
|
ConnectionState: ci,
|
||||||
localIndexId: myIndex,
|
localIndexId: myIndex,
|
||||||
remoteIndexId: hs.Details.InitiatorIndex,
|
remoteIndexId: hs.Details.InitiatorIndex,
|
||||||
vpnAddrs: vpnAddrs,
|
vpnIp: vpnIp,
|
||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
lastHandshakeTime: hs.Details.Time,
|
lastHandshakeTime: hs.Details.Time,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: nil,
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
@ -265,29 +166,14 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
Info("Handshake message received")
|
Info("Handshake message received")
|
||||||
|
|
||||||
hs.Details.ResponderIndex = myIndex
|
hs.Details.ResponderIndex = myIndex
|
||||||
hs.Details.Cert = cs.getHandshakeBytes(ci.myCert.Version())
|
hs.Details.Cert = certState.RawCertificateNoKey
|
||||||
if hs.Details.Cert == nil {
|
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
|
||||||
WithField("issuer", issuer).
|
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
|
||||||
WithField("certVersion", ci.myCert.Version()).
|
|
||||||
Error("Unable to handshake with host because no certificate handshake bytes is available")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
hs.Details.CertVersion = uint32(ci.myCert.Version())
|
|
||||||
// Update the time in case their clock is way off from ours
|
// Update the time in case their clock is way off from ours
|
||||||
hs.Details.Time = uint64(time.Now().UnixNano())
|
hs.Details.Time = uint64(time.Now().UnixNano())
|
||||||
|
|
||||||
hsBytes, err := hs.Marshal()
|
hsBytes, err := hs.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||||
@ -297,17 +183,15 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
nh := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, hs.Details.InitiatorIndex, 2)
|
nh := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, hs.Details.InitiatorIndex, 2)
|
||||||
msg, dKey, eKey, err := ci.H.WriteMessage(nh, hsBytes)
|
msg, dKey, eKey, err := ci.H.WriteMessage(nh, hsBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||||
return
|
return
|
||||||
} else if dKey == nil || eKey == nil {
|
} else if dKey == nil || eKey == nil {
|
||||||
f.l.WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Noise did not arrive at a key")
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Noise did not arrive at a key")
|
||||||
@ -330,9 +214,9 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
ci.dKey = NewNebulaCipherState(dKey)
|
ci.dKey = NewNebulaCipherState(dKey)
|
||||||
ci.eKey = NewNebulaCipherState(eKey)
|
ci.eKey = NewNebulaCipherState(eKey)
|
||||||
|
|
||||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnAddrs)
|
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||||
hostinfo.SetRemote(addr)
|
hostinfo.SetRemote(addr)
|
||||||
hostinfo.buildNetworks(filteredNetworks, remoteCert.Certificate.UnsafeNetworks())
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
|
|
||||||
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f)
|
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -342,7 +226,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
|
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||||
// Send a test packet to ensure the other side has also switched to
|
// Send a test packet to ensure the other side has also switched to
|
||||||
// the preferred remote
|
// the preferred remote
|
||||||
f.SendMessageToVpnAddr(header.Test, header.TestRequest, vpnAddrs[0], []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = existing.HandshakePacket[2]
|
msg = existing.HandshakePacket[2]
|
||||||
@ -350,11 +234,11 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
if addr.IsValid() {
|
if addr.IsValid() {
|
||||||
err := f.outside.WriteTo(msg, addr)
|
err := f.outside.WriteTo(msg, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithField("vpnAddrs", existing.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
||||||
WithError(err).Error("Failed to send handshake message")
|
WithError(err).Error("Failed to send handshake message")
|
||||||
} else {
|
} else {
|
||||||
f.l.WithField("vpnAddrs", existing.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
@ -364,18 +248,17 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
f.l.Error("Handshake send failed: both addr and via are nil.")
|
f.l.Error("Handshake send failed: both addr and via are nil.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnAddrs[0])
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnAddrs", existing.vpnAddrs).WithField("relay", via.relayHI.vpnAddrs[0]).
|
f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case ErrExistingHostInfo:
|
case ErrExistingHostInfo:
|
||||||
// This means there was an existing tunnel and this handshake was older than the one we are currently based on
|
// This means there was an existing tunnel and this handshake was older than the one we are currently based on
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("oldHandshakeTime", existing.lastHandshakeTime).
|
WithField("oldHandshakeTime", existing.lastHandshakeTime).
|
||||||
WithField("newHandshakeTime", hostinfo.lastHandshakeTime).
|
WithField("newHandshakeTime", hostinfo.lastHandshakeTime).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
@ -385,26 +268,24 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
Info("Handshake too old")
|
Info("Handshake too old")
|
||||||
|
|
||||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||||
f.SendMessageToVpnAddr(header.Test, header.TestRequest, vpnAddrs[0], []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
return
|
return
|
||||||
case ErrLocalIndexCollision:
|
case ErrLocalIndexCollision:
|
||||||
// This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry
|
// This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnAddrs).
|
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
|
||||||
Error("Failed to add HostInfo due to localIndex collision")
|
Error("Failed to add HostInfo due to localIndex collision")
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
|
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
|
||||||
// And we forget to update it here
|
// And we forget to update it here
|
||||||
f.l.WithError(err).WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
@ -419,18 +300,16 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
if addr.IsValid() {
|
if addr.IsValid() {
|
||||||
err = f.outside.WriteTo(msg, addr)
|
err = f.outside.WriteTo(msg, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
WithError(err).Error("Failed to send handshake")
|
WithError(err).Error("Failed to send handshake")
|
||||||
} else {
|
} else {
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
@ -442,14 +321,13 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
f.l.Error("Handshake send failed: both addr and via are nil.")
|
f.l.Error("Handshake send failed: both addr and via are nil.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnAddrs[0])
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
// I successfully received a handshake. Just in case I marked this tunnel as 'Disestablished', ensure
|
// I successfully received a handshake. Just in case I marked this tunnel as 'Disestablished', ensure
|
||||||
// it's correctly marked as working.
|
// it's correctly marked as working.
|
||||||
via.relayHI.relayState.UpdateRelayForByIdxState(via.remoteIdx, Established)
|
via.relayHI.relayState.UpdateRelayForByIdxState(via.remoteIdx, Established)
|
||||||
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("relay", via.relayHI.vpnAddrs[0]).
|
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
WithField("issuer", issuer).
|
WithField("issuer", issuer).
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
@ -475,9 +353,8 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
|
|
||||||
hostinfo := hh.hostinfo
|
hostinfo := hh.hostinfo
|
||||||
if addr.IsValid() {
|
if addr.IsValid() {
|
||||||
// The vpnAddr we know about is the one we tried to handshake with, use it to apply the remote allow list.
|
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.Addr()) {
|
||||||
if !f.lightHouse.GetRemoteAllowList().AllowAll(hostinfo.vpnAddrs, addr.Addr()) {
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
f.l.WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -485,7 +362,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
ci := hostinfo.ConnectionState
|
ci := hostinfo.ConnectionState
|
||||||
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
||||||
Error("Failed to call noise.ReadMessage")
|
Error("Failed to call noise.ReadMessage")
|
||||||
|
|
||||||
@ -494,7 +371,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
// near future
|
// near future
|
||||||
return false
|
return false
|
||||||
} else if dKey == nil || eKey == nil {
|
} else if dKey == nil || eKey == nil {
|
||||||
f.l.WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
Error("Noise did not arrive at a key")
|
Error("Noise did not arrive at a key")
|
||||||
|
|
||||||
@ -506,57 +383,95 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
hs := &NebulaHandshake{}
|
hs := &NebulaHandshake{}
|
||||||
err = hs.Unmarshal(msg)
|
err = hs.Unmarshal(msg)
|
||||||
if err != nil || hs.Details == nil {
|
if err != nil || hs.Details == nil {
|
||||||
f.l.WithError(err).WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
|
||||||
|
|
||||||
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
rc, err := cert.Recombine(cert.Version(hs.Details.CertVersion), hs.Details.Cert, ci.H.PeerStatic(), ci.Curve())
|
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
e := f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||||
WithField("vpnAddrs", hostinfo.vpnAddrs).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
|
||||||
Info("Handshake did not contain a certificate")
|
if f.l.Level > logrus.DebugLevel {
|
||||||
|
e = e.WithField("cert", remoteCert)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Error("Invalid certificate from host")
|
||||||
|
|
||||||
|
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteCert, err := f.pki.GetCAPool().VerifyCertificate(time.Now(), rc)
|
vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
|
||||||
if err != nil {
|
if !ok {
|
||||||
fp, err := rc.Fingerprint()
|
|
||||||
if err != nil {
|
|
||||||
fp = "<error generating certificate fingerprint>"
|
|
||||||
}
|
|
||||||
|
|
||||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
e := f.l.WithError(err).WithField("udpAddr", addr).
|
||||||
WithField("vpnAddrs", hostinfo.vpnAddrs).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
|
||||||
WithField("certFingerprint", fp).
|
|
||||||
WithField("certVpnNetworks", rc.Networks())
|
|
||||||
|
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level > logrus.DebugLevel {
|
||||||
e = e.WithField("cert", rc)
|
e = e.WithField("cert", remoteCert)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.Info("Invalid certificate from host")
|
e.Info("Invalid vpn ip from host")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(remoteCert.Certificate.Networks()) == 0 {
|
vpnIp = vpnIp.Unmap()
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
certName := remoteCert.Details.Name
|
||||||
WithField("vpnAddrs", hostinfo.vpnAddrs).
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
WithField("cert", remoteCert).
|
issuer := remoteCert.Details.Issuer
|
||||||
|
|
||||||
|
// Ensure the right host responded
|
||||||
|
if vpnIp != hostinfo.vpnIp {
|
||||||
|
f.l.WithField("intendedVpnIp", hostinfo.vpnIp).WithField("haveVpnIp", vpnIp).
|
||||||
|
WithField("udpAddr", addr).WithField("certName", certName).
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
Info("No networks in certificate")
|
Info("Incorrect host responded to handshake")
|
||||||
|
|
||||||
|
// Release our old handshake from pending, it should not continue
|
||||||
|
f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||||
|
|
||||||
|
// Create a new hostinfo/handshake for the intended vpn ip
|
||||||
|
f.handshakeManager.StartHandshake(hostinfo.vpnIp, func(newHH *HandshakeHostInfo) {
|
||||||
|
//TODO: this doesnt know if its being added or is being used for caching a packet
|
||||||
|
// Block the current used address
|
||||||
|
newHH.hostinfo.remotes = hostinfo.remotes
|
||||||
|
newHH.hostinfo.remotes.BlockRemote(addr)
|
||||||
|
|
||||||
|
// Get the correct remote list for the host we did handshake with
|
||||||
|
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||||
|
|
||||||
|
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||||
|
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.GetPreferredRanges())).
|
||||||
|
Info("Blocked addresses for handshakes")
|
||||||
|
|
||||||
|
// Swap the packet store to benefit the original intended recipient
|
||||||
|
newHH.packetStore = hh.packetStore
|
||||||
|
hh.packetStore = []*cachedPacket{}
|
||||||
|
|
||||||
|
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
||||||
|
hostinfo.vpnIp = vpnIp
|
||||||
|
f.sendCloseTunnel(hostinfo)
|
||||||
|
})
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
vpnNetworks := remoteCert.Certificate.Networks()
|
// Mark packet 2 as seen so it doesn't show up as missed
|
||||||
certName := remoteCert.Certificate.Name()
|
ci.window.Update(f.l, 2)
|
||||||
certVersion := remoteCert.Certificate.Version()
|
|
||||||
fingerprint := remoteCert.Fingerprint
|
duration := time.Since(hh.startTime).Nanoseconds()
|
||||||
issuer := remoteCert.Certificate.Issuer()
|
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||||
|
WithField("certName", certName).
|
||||||
|
WithField("fingerprint", fingerprint).
|
||||||
|
WithField("issuer", issuer).
|
||||||
|
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||||
|
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||||
|
WithField("durationNs", duration).
|
||||||
|
WithField("sentCachedPackets", len(hh.packetStore)).
|
||||||
|
Info("Handshake message received")
|
||||||
|
|
||||||
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
||||||
hostinfo.lastHandshakeTime = hs.Details.Time
|
hostinfo.lastHandshakeTime = hs.Details.Time
|
||||||
@ -570,87 +485,13 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
if addr.IsValid() {
|
if addr.IsValid() {
|
||||||
hostinfo.SetRemote(addr)
|
hostinfo.SetRemote(addr)
|
||||||
} else {
|
} else {
|
||||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnAddrs[0])
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
var vpnAddrs []netip.Addr
|
|
||||||
var filteredNetworks []netip.Prefix
|
|
||||||
for _, network := range vpnNetworks {
|
|
||||||
// vpnAddrs outside our vpn networks are of no use to us, filter them out
|
|
||||||
vpnAddr := network.Addr()
|
|
||||||
if !f.myVpnNetworksTable.Contains(vpnAddr) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
filteredNetworks = append(filteredNetworks, network)
|
|
||||||
vpnAddrs = append(vpnAddrs, vpnAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vpnAddrs) == 0 {
|
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
|
||||||
WithField("issuer", issuer).
|
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("No usable vpn addresses from host, refusing handshake")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the right host responded
|
|
||||||
if !slices.Contains(vpnAddrs, hostinfo.vpnAddrs[0]) {
|
|
||||||
f.l.WithField("intendedVpnAddrs", hostinfo.vpnAddrs).WithField("haveVpnNetworks", vpnNetworks).
|
|
||||||
WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
|
||||||
Info("Incorrect host responded to handshake")
|
|
||||||
|
|
||||||
// Release our old handshake from pending, it should not continue
|
|
||||||
f.handshakeManager.DeleteHostInfo(hostinfo)
|
|
||||||
|
|
||||||
// Create a new hostinfo/handshake for the intended vpn ip
|
|
||||||
f.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], func(newHH *HandshakeHostInfo) {
|
|
||||||
// Block the current used address
|
|
||||||
newHH.hostinfo.remotes = hostinfo.remotes
|
|
||||||
newHH.hostinfo.remotes.BlockRemote(addr)
|
|
||||||
|
|
||||||
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).
|
|
||||||
WithField("vpnNetworks", vpnNetworks).
|
|
||||||
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.GetPreferredRanges())).
|
|
||||||
Info("Blocked addresses for handshakes")
|
|
||||||
|
|
||||||
// Swap the packet store to benefit the original intended recipient
|
|
||||||
newHH.packetStore = hh.packetStore
|
|
||||||
hh.packetStore = []*cachedPacket{}
|
|
||||||
|
|
||||||
// Finally, put the correct vpn addrs in the host info, tell them to close the tunnel, and return true to tear down
|
|
||||||
hostinfo.vpnAddrs = vpnAddrs
|
|
||||||
f.sendCloseTunnel(hostinfo)
|
|
||||||
})
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark packet 2 as seen so it doesn't show up as missed
|
|
||||||
ci.window.Update(f.l, 2)
|
|
||||||
|
|
||||||
duration := time.Since(hh.startTime).Nanoseconds()
|
|
||||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
|
||||||
WithField("certName", certName).
|
|
||||||
WithField("certVersion", certVersion).
|
|
||||||
WithField("fingerprint", fingerprint).
|
|
||||||
WithField("issuer", issuer).
|
|
||||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
|
||||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
|
||||||
WithField("durationNs", duration).
|
|
||||||
WithField("sentCachedPackets", len(hh.packetStore)).
|
|
||||||
Info("Handshake message received")
|
|
||||||
|
|
||||||
// Build up the radix for the firewall if we have subnets in the cert
|
// Build up the radix for the firewall if we have subnets in the cert
|
||||||
hostinfo.vpnAddrs = vpnAddrs
|
hostinfo.CreateRemoteCIDR(remoteCert)
|
||||||
hostinfo.buildNetworks(filteredNetworks, remoteCert.Certificate.UnsafeNetworks())
|
|
||||||
|
|
||||||
// Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here
|
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||||
f.handshakeManager.Complete(hostinfo, f)
|
f.handshakeManager.Complete(hostinfo, f)
|
||||||
f.connectionManager.AddTrafficWatch(hostinfo)
|
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||||
|
|
||||||
|
|||||||
@ -7,15 +7,14 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"slices"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -119,18 +118,18 @@ func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *Lig
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) Run(ctx context.Context) {
|
func (c *HandshakeManager) Run(ctx context.Context) {
|
||||||
clockSource := time.NewTicker(hm.config.tryInterval)
|
clockSource := time.NewTicker(c.config.tryInterval)
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case vpnIP := <-hm.trigger:
|
case vpnIP := <-c.trigger:
|
||||||
hm.handleOutbound(vpnIP, true)
|
c.handleOutbound(vpnIP, true)
|
||||||
case now := <-clockSource.C:
|
case now := <-clockSource.C:
|
||||||
hm.NextOutboundHandshakeTimerTick(now)
|
c.NextOutboundHandshakeTimerTick(now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -138,7 +137,7 @@ func (hm *HandshakeManager) Run(ctx context.Context) {
|
|||||||
func (hm *HandshakeManager) HandleIncoming(addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
func (hm *HandshakeManager) HandleIncoming(addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
||||||
// First remote allow list check before we know the vpnIp
|
// First remote allow list check before we know the vpnIp
|
||||||
if addr.IsValid() {
|
if addr.IsValid() {
|
||||||
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnAddr(addr.Addr()) {
|
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.Addr()) {
|
||||||
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -160,14 +159,14 @@ func (hm *HandshakeManager) HandleIncoming(addr netip.AddrPort, via *ViaSender,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
|
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
|
||||||
hm.OutboundHandshakeTimer.Advance(now)
|
c.OutboundHandshakeTimer.Advance(now)
|
||||||
for {
|
for {
|
||||||
vpnIp, has := hm.OutboundHandshakeTimer.Purge()
|
vpnIp, has := c.OutboundHandshakeTimer.Purge()
|
||||||
if !has {
|
if !has {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
hm.handleOutbound(vpnIp, false)
|
c.handleOutbound(vpnIp, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,7 +208,7 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
|
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
|
||||||
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
||||||
if hostinfo.remotes == nil {
|
if hostinfo.remotes == nil {
|
||||||
hostinfo.remotes = hm.lightHouse.QueryCache([]netip.Addr{vpnIp})
|
hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())
|
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())
|
||||||
@ -224,7 +223,7 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
|
|
||||||
hh.lastRemotes = remotes
|
hh.lastRemotes = remotes
|
||||||
|
|
||||||
// This will generate a load of queries for hosts with only 1 ip
|
// TODO: this will generate a load of queries for hosts with only 1 ip
|
||||||
// (such as ones registered to the lighthouse with only a private IP)
|
// (such as ones registered to the lighthouse with only a private IP)
|
||||||
// So we only do it one time after attempting 5 handshakes already.
|
// So we only do it one time after attempting 5 handshakes already.
|
||||||
if len(remotes) <= 1 && hh.counter == 5 {
|
if len(remotes) <= 1 && hh.counter == 5 {
|
||||||
@ -257,7 +256,7 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
} else if hm.l.Level >= logrus.DebugLevel {
|
} else if hm.l.IsLevelEnabled(logrus.DebugLevel) {
|
||||||
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
||||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||||
@ -268,23 +267,17 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||||
// Send a RelayRequest to all known Relay IP's
|
// Send a RelayRequest to all known Relay IP's
|
||||||
for _, relay := range hostinfo.remotes.relays {
|
for _, relay := range hostinfo.remotes.relays {
|
||||||
// Don't relay to myself
|
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
||||||
if relay == vpnIp {
|
if relay == vpnIp || relay == hm.lightHouse.myVpnNet.Addr() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
relayHostInfo := hm.mainHostMap.QueryVpnIp(relay)
|
||||||
// Don't relay through the host I'm trying to connect to
|
|
||||||
if hm.f.myVpnAddrsTable.Contains(relay) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
relayHostInfo := hm.mainHostMap.QueryVpnAddr(relay)
|
|
||||||
if relayHostInfo == nil || !relayHostInfo.remote.IsValid() {
|
if relayHostInfo == nil || !relayHostInfo.remote.IsValid() {
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||||
hm.f.Handshake(relay)
|
hm.f.Handshake(relay)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Check the relay HostInfo to see if we already established a relay through
|
// Check the relay HostInfo to see if we already established a relay through it
|
||||||
existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp)
|
existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp)
|
||||||
if !ok {
|
if !ok {
|
||||||
// No relays exist or requested yet.
|
// No relays exist or requested yet.
|
||||||
@ -294,35 +287,16 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO: IPV6-WORK
|
||||||
|
myVpnIpB := hm.f.myVpnNet.Addr().As4()
|
||||||
|
theirVpnIpB := vpnIp.As4()
|
||||||
|
|
||||||
m := NebulaControl{
|
m := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
InitiatorRelayIndex: idx,
|
InitiatorRelayIndex: idx,
|
||||||
|
RelayFromIp: binary.BigEndian.Uint32(myVpnIpB[:]),
|
||||||
|
RelayToIp: binary.BigEndian.Uint32(theirVpnIpB[:]),
|
||||||
}
|
}
|
||||||
|
|
||||||
switch relayHostInfo.GetCert().Certificate.Version() {
|
|
||||||
case cert.Version1:
|
|
||||||
if !hm.f.myVpnAddrs[0].Is4() {
|
|
||||||
hostinfo.logger(hm.l).Error("can not establish v1 relay with a v6 network because the relay is not running a current nebula version")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !vpnIp.Is4() {
|
|
||||||
hostinfo.logger(hm.l).Error("can not establish v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b := hm.f.myVpnAddrs[0].As4()
|
|
||||||
m.OldRelayFromAddr = binary.BigEndian.Uint32(b[:])
|
|
||||||
b = vpnIp.As4()
|
|
||||||
m.OldRelayToAddr = binary.BigEndian.Uint32(b[:])
|
|
||||||
case cert.Version2:
|
|
||||||
m.RelayFromAddr = netAddrToProtoAddr(hm.f.myVpnAddrs[0])
|
|
||||||
m.RelayToAddr = netAddrToProtoAddr(vpnIp)
|
|
||||||
default:
|
|
||||||
hostinfo.logger(hm.l).Error("Unknown certificate version found while creating relay")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, err := m.Marshal()
|
msg, err := m.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(hm.l).
|
hostinfo.logger(hm.l).
|
||||||
@ -331,7 +305,7 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
} else {
|
} else {
|
||||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
hm.l.WithFields(logrus.Fields{
|
hm.l.WithFields(logrus.Fields{
|
||||||
"relayFrom": hm.f.myVpnAddrs[0],
|
"relayFrom": hm.f.myVpnNet.Addr(),
|
||||||
"relayTo": vpnIp,
|
"relayTo": vpnIp,
|
||||||
"initiatorRelayIndex": idx,
|
"initiatorRelayIndex": idx,
|
||||||
"relay": relay}).
|
"relay": relay}).
|
||||||
@ -340,7 +314,6 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
switch existingRelay.State {
|
switch existingRelay.State {
|
||||||
case Established:
|
case Established:
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||||
@ -352,34 +325,15 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
case Requested:
|
case Requested:
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||||
// Re-send the CreateRelay request, in case the previous one was lost.
|
// Re-send the CreateRelay request, in case the previous one was lost.
|
||||||
|
relayFrom := hm.f.myVpnNet.Addr().As4()
|
||||||
|
relayTo := vpnIp.As4()
|
||||||
m := NebulaControl{
|
m := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
InitiatorRelayIndex: existingRelay.LocalIndex,
|
InitiatorRelayIndex: existingRelay.LocalIndex,
|
||||||
|
RelayFromIp: binary.BigEndian.Uint32(relayFrom[:]),
|
||||||
|
RelayToIp: binary.BigEndian.Uint32(relayTo[:]),
|
||||||
}
|
}
|
||||||
|
|
||||||
switch relayHostInfo.GetCert().Certificate.Version() {
|
|
||||||
case cert.Version1:
|
|
||||||
if !hm.f.myVpnAddrs[0].Is4() {
|
|
||||||
hostinfo.logger(hm.l).Error("can not establish v1 relay with a v6 network because the relay is not running a current nebula version")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !vpnIp.Is4() {
|
|
||||||
hostinfo.logger(hm.l).Error("can not establish v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b := hm.f.myVpnAddrs[0].As4()
|
|
||||||
m.OldRelayFromAddr = binary.BigEndian.Uint32(b[:])
|
|
||||||
b = vpnIp.As4()
|
|
||||||
m.OldRelayToAddr = binary.BigEndian.Uint32(b[:])
|
|
||||||
case cert.Version2:
|
|
||||||
m.RelayFromAddr = netAddrToProtoAddr(hm.f.myVpnAddrs[0])
|
|
||||||
m.RelayToAddr = netAddrToProtoAddr(vpnIp)
|
|
||||||
default:
|
|
||||||
hostinfo.logger(hm.l).Error("Unknown certificate version found while creating relay")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
msg, err := m.Marshal()
|
msg, err := m.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(hm.l).
|
hostinfo.logger(hm.l).
|
||||||
@ -389,7 +343,7 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
// This must send over the hostinfo, not over hm.Hosts[ip]
|
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
hm.l.WithFields(logrus.Fields{
|
hm.l.WithFields(logrus.Fields{
|
||||||
"relayFrom": hm.f.myVpnAddrs[0],
|
"relayFrom": hm.f.myVpnNet,
|
||||||
"relayTo": vpnIp,
|
"relayTo": vpnIp,
|
||||||
"initiatorRelayIndex": existingRelay.LocalIndex,
|
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||||
"relay": relay}).
|
"relay": relay}).
|
||||||
@ -404,7 +358,6 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
WithField("state", existingRelay.State).
|
WithField("state", existingRelay.State).
|
||||||
WithField("relay", relay).
|
WithField("relay", relay).
|
||||||
Errorf("Relay unexpected state")
|
Errorf("Relay unexpected state")
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -434,10 +387,10 @@ func (hm *HandshakeManager) GetOrHandshake(vpnIp netip.Addr, cacheCb func(*Hands
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
||||||
func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*HandshakeHostInfo)) *HostInfo {
|
func (hm *HandshakeManager) StartHandshake(vpnIp netip.Addr, cacheCb func(*HandshakeHostInfo)) *HostInfo {
|
||||||
hm.Lock()
|
hm.Lock()
|
||||||
|
|
||||||
if hh, ok := hm.vpnIps[vpnAddr]; ok {
|
if hh, ok := hm.vpnIps[vpnIp]; ok {
|
||||||
// We are already trying to handshake with this vpn ip
|
// We are already trying to handshake with this vpn ip
|
||||||
if cacheCb != nil {
|
if cacheCb != nil {
|
||||||
cacheCb(hh)
|
cacheCb(hh)
|
||||||
@ -447,12 +400,12 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
|
|||||||
}
|
}
|
||||||
|
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
vpnAddrs: []netip.Addr{vpnAddr},
|
vpnIp: vpnIp,
|
||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: nil,
|
relays: nil,
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,9 +413,9 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
|
|||||||
hostinfo: hostinfo,
|
hostinfo: hostinfo,
|
||||||
startTime: time.Now(),
|
startTime: time.Now(),
|
||||||
}
|
}
|
||||||
hm.vpnIps[vpnAddr] = hh
|
hm.vpnIps[vpnIp] = hh
|
||||||
hm.metricInitiated.Inc(1)
|
hm.metricInitiated.Inc(1)
|
||||||
hm.OutboundHandshakeTimer.Add(vpnAddr, hm.config.tryInterval)
|
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval)
|
||||||
|
|
||||||
if cacheCb != nil {
|
if cacheCb != nil {
|
||||||
cacheCb(hh)
|
cacheCb(hh)
|
||||||
@ -470,21 +423,21 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
|
|||||||
|
|
||||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||||
// We can trigger the handshake right now
|
// We can trigger the handshake right now
|
||||||
_, doTrigger := hm.lightHouse.GetStaticHostList()[vpnAddr]
|
_, doTrigger := hm.lightHouse.GetStaticHostList()[vpnIp]
|
||||||
if !doTrigger {
|
if !doTrigger {
|
||||||
// Add any calculated remotes, and trigger early handshake if one found
|
// Add any calculated remotes, and trigger early handshake if one found
|
||||||
doTrigger = hm.lightHouse.addCalculatedRemotes(vpnAddr)
|
doTrigger = hm.lightHouse.addCalculatedRemotes(vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if doTrigger {
|
if doTrigger {
|
||||||
select {
|
select {
|
||||||
case hm.trigger <- vpnAddr:
|
case hm.trigger <- vpnIp:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hm.Unlock()
|
hm.Unlock()
|
||||||
hm.lightHouse.QueryServer(vpnAddr)
|
hm.lightHouse.QueryServer(vpnIp)
|
||||||
return hostinfo
|
return hostinfo
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -505,14 +458,14 @@ var (
|
|||||||
//
|
//
|
||||||
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
||||||
// hostmap for the hostinfo.localIndexId.
|
// hostmap for the hostinfo.localIndexId.
|
||||||
func (hm *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
|
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
|
||||||
hm.mainHostMap.Lock()
|
c.mainHostMap.Lock()
|
||||||
defer hm.mainHostMap.Unlock()
|
defer c.mainHostMap.Unlock()
|
||||||
hm.Lock()
|
c.Lock()
|
||||||
defer hm.Unlock()
|
defer c.Unlock()
|
||||||
|
|
||||||
// Check if we already have a tunnel with this vpn ip
|
// Check if we already have a tunnel with this vpn ip
|
||||||
existingHostInfo, found := hm.mainHostMap.Hosts[hostinfo.vpnAddrs[0]]
|
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
||||||
if found && existingHostInfo != nil {
|
if found && existingHostInfo != nil {
|
||||||
testHostInfo := existingHostInfo
|
testHostInfo := existingHostInfo
|
||||||
for testHostInfo != nil {
|
for testHostInfo != nil {
|
||||||
@ -529,31 +482,31 @@ func (hm *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
|||||||
return existingHostInfo, ErrExistingHostInfo
|
return existingHostInfo, ErrExistingHostInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
existingHostInfo.logger(hm.l).Info("Taking new handshake")
|
existingHostInfo.logger(c.l).Info("Taking new handshake")
|
||||||
}
|
}
|
||||||
|
|
||||||
existingIndex, found := hm.mainHostMap.Indexes[hostinfo.localIndexId]
|
existingIndex, found := c.mainHostMap.Indexes[hostinfo.localIndexId]
|
||||||
if found {
|
if found {
|
||||||
// We have a collision, but for a different hostinfo
|
// We have a collision, but for a different hostinfo
|
||||||
return existingIndex, ErrLocalIndexCollision
|
return existingIndex, ErrLocalIndexCollision
|
||||||
}
|
}
|
||||||
|
|
||||||
existingPendingIndex, found := hm.indexes[hostinfo.localIndexId]
|
existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
|
||||||
if found && existingPendingIndex.hostinfo != hostinfo {
|
if found && existingPendingIndex.hostinfo != hostinfo {
|
||||||
// We have a collision, but for a different hostinfo
|
// We have a collision, but for a different hostinfo
|
||||||
return existingPendingIndex.hostinfo, ErrLocalIndexCollision
|
return existingPendingIndex.hostinfo, ErrLocalIndexCollision
|
||||||
}
|
}
|
||||||
|
|
||||||
existingRemoteIndex, found := hm.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||||
if found && existingRemoteIndex != nil && existingRemoteIndex.vpnAddrs[0] != hostinfo.vpnAddrs[0] {
|
if found && existingRemoteIndex != nil && existingRemoteIndex.vpnIp != hostinfo.vpnIp {
|
||||||
// We have a collision, but this can happen since we can't control
|
// We have a collision, but this can happen since we can't control
|
||||||
// the remote ID. Just log about the situation as a note.
|
// the remote ID. Just log about the situation as a note.
|
||||||
hostinfo.logger(hm.l).
|
hostinfo.logger(c.l).
|
||||||
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnAddrs).
|
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
||||||
Info("New host shadows existing host remoteIndex")
|
Info("New host shadows existing host remoteIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
hm.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||||
return existingHostInfo, nil
|
return existingHostInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -571,7 +524,7 @@ func (hm *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
|||||||
// We have a collision, but this can happen since we can't control
|
// We have a collision, but this can happen since we can't control
|
||||||
// the remote ID. Just log about the situation as a note.
|
// the remote ID. Just log about the situation as a note.
|
||||||
hostinfo.logger(hm.l).
|
hostinfo.logger(hm.l).
|
||||||
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnAddrs).
|
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
||||||
Info("New host shadows existing host remoteIndex")
|
Info("New host shadows existing host remoteIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -608,34 +561,31 @@ func (hm *HandshakeManager) allocateIndex(hh *HandshakeHostInfo) error {
|
|||||||
return errors.New("failed to generate unique localIndexId")
|
return errors.New("failed to generate unique localIndexId")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
||||||
hm.Lock()
|
c.Lock()
|
||||||
defer hm.Unlock()
|
defer c.Unlock()
|
||||||
hm.unlockedDeleteHostInfo(hostinfo)
|
c.unlockedDeleteHostInfo(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
for _, addr := range hostinfo.vpnAddrs {
|
delete(c.vpnIps, hostinfo.vpnIp)
|
||||||
delete(hm.vpnIps, addr)
|
if len(c.vpnIps) == 0 {
|
||||||
|
c.vpnIps = map[netip.Addr]*HandshakeHostInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(hm.vpnIps) == 0 {
|
delete(c.indexes, hostinfo.localIndexId)
|
||||||
hm.vpnIps = map[netip.Addr]*HandshakeHostInfo{}
|
if len(c.vpnIps) == 0 {
|
||||||
|
c.indexes = map[uint32]*HandshakeHostInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(hm.indexes, hostinfo.localIndexId)
|
if c.l.Level >= logrus.DebugLevel {
|
||||||
if len(hm.indexes) == 0 {
|
c.l.WithField("hostMap", m{"mapTotalSize": len(c.vpnIps),
|
||||||
hm.indexes = map[uint32]*HandshakeHostInfo{}
|
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||||
}
|
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
|
||||||
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.vpnIps),
|
|
||||||
"vpnAddrs": hostinfo.vpnAddrs, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
|
||||||
Debug("Pending hostmap hostInfo deleted")
|
Debug("Pending hostmap hostInfo deleted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) QueryVpnAddr(vpnIp netip.Addr) *HostInfo {
|
func (hm *HandshakeManager) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
||||||
hh := hm.queryVpnIp(vpnIp)
|
hh := hm.queryVpnIp(vpnIp)
|
||||||
if hh != nil {
|
if hh != nil {
|
||||||
return hh.hostinfo
|
return hh.hostinfo
|
||||||
@ -664,37 +614,37 @@ func (hm *HandshakeManager) queryIndex(index uint32) *HandshakeHostInfo {
|
|||||||
return hm.indexes[index]
|
return hm.indexes[index]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) GetPreferredRanges() []netip.Prefix {
|
func (c *HandshakeManager) GetPreferredRanges() []netip.Prefix {
|
||||||
return hm.mainHostMap.GetPreferredRanges()
|
return c.mainHostMap.GetPreferredRanges()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) ForEachVpnAddr(f controlEach) {
|
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
|
||||||
hm.RLock()
|
c.RLock()
|
||||||
defer hm.RUnlock()
|
defer c.RUnlock()
|
||||||
|
|
||||||
for _, v := range hm.vpnIps {
|
for _, v := range c.vpnIps {
|
||||||
f(v.hostinfo)
|
f(v.hostinfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) ForEachIndex(f controlEach) {
|
func (c *HandshakeManager) ForEachIndex(f controlEach) {
|
||||||
hm.RLock()
|
c.RLock()
|
||||||
defer hm.RUnlock()
|
defer c.RUnlock()
|
||||||
|
|
||||||
for _, v := range hm.indexes {
|
for _, v := range c.indexes {
|
||||||
f(v.hostinfo)
|
f(v.hostinfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HandshakeManager) EmitStats() {
|
func (c *HandshakeManager) EmitStats() {
|
||||||
hm.RLock()
|
c.RLock()
|
||||||
hostLen := len(hm.vpnIps)
|
hostLen := len(c.vpnIps)
|
||||||
indexLen := len(hm.indexes)
|
indexLen := len(c.indexes)
|
||||||
hm.RUnlock()
|
c.RUnlock()
|
||||||
|
|
||||||
metrics.GetOrRegisterGauge("hostmap.pending.hosts", nil).Update(int64(hostLen))
|
metrics.GetOrRegisterGauge("hostmap.pending.hosts", nil).Update(int64(hostLen))
|
||||||
metrics.GetOrRegisterGauge("hostmap.pending.indexes", nil).Update(int64(indexLen))
|
metrics.GetOrRegisterGauge("hostmap.pending.indexes", nil).Update(int64(indexLen))
|
||||||
hm.mainHostMap.EmitStats()
|
c.mainHostMap.EmitStats()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Utility functions below
|
// Utility functions below
|
||||||
|
|||||||
@ -14,20 +14,21 @@ import (
|
|||||||
|
|
||||||
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
|
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||||
ip := netip.MustParseAddr("172.1.1.2")
|
ip := netip.MustParseAddr("172.1.1.2")
|
||||||
|
|
||||||
preferredRanges := []netip.Prefix{localrange}
|
preferredRanges := []netip.Prefix{localrange}
|
||||||
mainHM := newHostMap(l)
|
mainHM := newHostMap(l, vpncidr)
|
||||||
mainHM.preferredRanges.Store(&preferredRanges)
|
mainHM.preferredRanges.Store(&preferredRanges)
|
||||||
|
|
||||||
lh := newTestLighthouse()
|
lh := newTestLighthouse()
|
||||||
|
|
||||||
cs := &CertState{
|
cs := &CertState{
|
||||||
initiatingVersion: cert.Version1,
|
RawCertificate: []byte{},
|
||||||
privateKey: []byte{},
|
PrivateKey: []byte{},
|
||||||
v1Cert: &dummyCert{version: cert.Version1},
|
Certificate: &cert.NebulaCertificate{},
|
||||||
v1HandshakeBytes: []byte{},
|
RawCertificateNoKey: []byte{},
|
||||||
}
|
}
|
||||||
|
|
||||||
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
|
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
|
||||||
@ -41,10 +42,10 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
|||||||
i2 := blah.StartHandshake(ip, nil)
|
i2 := blah.StartHandshake(ip, nil)
|
||||||
assert.Same(t, i, i2)
|
assert.Same(t, i, i2)
|
||||||
|
|
||||||
i.remotes = NewRemoteList([]netip.Addr{}, nil)
|
i.remotes = NewRemoteList(nil)
|
||||||
|
|
||||||
// Adding something to pending should not affect the main hostmap
|
// Adding something to pending should not affect the main hostmap
|
||||||
assert.Empty(t, mainHM.Hosts)
|
assert.Len(t, mainHM.Hosts, 0)
|
||||||
|
|
||||||
// Confirm they are in the pending index list
|
// Confirm they are in the pending index list
|
||||||
assert.Contains(t, blah.vpnIps, ip)
|
assert.Contains(t, blah.vpnIps, ip)
|
||||||
@ -79,24 +80,16 @@ func testCountTimerWheelEntries(tw *LockingTimerWheel[netip.Addr]) (c int) {
|
|||||||
type mockEncWriter struct {
|
type mockEncWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendMessageToVpnAddr(_ header.MessageType, _ header.MessageSubType, _ netip.Addr, _, _, _ []byte) {
|
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendVia(_ *HostInfo, _ *Relay, _, _, _ []byte, _ bool) {
|
func (mw *mockEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) SendMessageToHostInfo(_ header.MessageType, _ header.MessageSubType, _ *HostInfo, _, _, _ []byte) {
|
func (mw *mockEncWriter) SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockEncWriter) Handshake(_ netip.Addr) {}
|
func (mw *mockEncWriter) Handshake(vpnIP netip.Addr) {}
|
||||||
|
|
||||||
func (mw *mockEncWriter) GetHostInfo(_ netip.Addr) *HostInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mw *mockEncWriter) GetCertState() *CertState {
|
|
||||||
return &CertState{initiatingVersion: cert.Version2}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import (
|
|||||||
// |-----------------------------------------------------------------------|
|
// |-----------------------------------------------------------------------|
|
||||||
// | payload... |
|
// | payload... |
|
||||||
|
|
||||||
type m = map[string]any
|
type m map[string]interface{}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Version uint8 = 1
|
Version uint8 = 1
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type headerTest struct {
|
type headerTest struct {
|
||||||
@ -112,7 +111,7 @@ func TestHeader_String(t *testing.T) {
|
|||||||
|
|
||||||
func TestHeader_MarshalJSON(t *testing.T) {
|
func TestHeader_MarshalJSON(t *testing.T) {
|
||||||
b, err := (&H{100, Test, TestRequest, 99, 98, 97}).MarshalJSON()
|
b, err := (&H{100, Test, TestRequest, 99, 98, 97}).MarshalJSON()
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"{\"messageCounter\":97,\"remoteIndex\":98,\"reserved\":99,\"subType\":\"testRequest\",\"type\":\"test\",\"version\":100}",
|
"{\"messageCounter\":97,\"remoteIndex\":98,\"reserved\":99,\"subType\":\"testRequest\",\"type\":\"test\",\"version\":100}",
|
||||||
|
|||||||
238
hostmap.go
238
hostmap.go
@ -50,7 +50,7 @@ type Relay struct {
|
|||||||
State int
|
State int
|
||||||
LocalIndex uint32
|
LocalIndex uint32
|
||||||
RemoteIndex uint32
|
RemoteIndex uint32
|
||||||
PeerAddr netip.Addr
|
PeerIp netip.Addr
|
||||||
}
|
}
|
||||||
|
|
||||||
type HostMap struct {
|
type HostMap struct {
|
||||||
@ -60,6 +60,7 @@ type HostMap struct {
|
|||||||
RemoteIndexes map[uint32]*HostInfo
|
RemoteIndexes map[uint32]*HostInfo
|
||||||
Hosts map[netip.Addr]*HostInfo
|
Hosts map[netip.Addr]*HostInfo
|
||||||
preferredRanges atomic.Pointer[[]netip.Prefix]
|
preferredRanges atomic.Pointer[[]netip.Prefix]
|
||||||
|
vpnCIDR netip.Prefix
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -69,12 +70,9 @@ type HostMap struct {
|
|||||||
type RelayState struct {
|
type RelayState struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
relays []netip.Addr // Ordered set of VpnAddrs of Hosts to use as relays to access this peer
|
relays []netip.Addr // Ordered set of VpnIp's of Hosts to use as relays to access this peer
|
||||||
// For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data,
|
relayForByIp map[netip.Addr]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
||||||
// modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with
|
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
||||||
// the RelayState Lock held)
|
|
||||||
relayForByAddr map[netip.Addr]*Relay // Maps vpnAddr of peers for which this HostInfo is a relay to some Relay info
|
|
||||||
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
||||||
@ -91,10 +89,10 @@ func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
|||||||
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
if r, ok := rs.relayForByAddr[vpnIp]; ok {
|
if r, ok := rs.relayForByIp[vpnIp]; ok {
|
||||||
newRelay := *r
|
newRelay := *r
|
||||||
newRelay.State = state
|
newRelay.State = state
|
||||||
rs.relayForByAddr[newRelay.PeerAddr] = &newRelay
|
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
||||||
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -105,7 +103,7 @@ func (rs *RelayState) UpdateRelayForByIdxState(idx uint32, state int) {
|
|||||||
if r, ok := rs.relayForByIdx[idx]; ok {
|
if r, ok := rs.relayForByIdx[idx]; ok {
|
||||||
newRelay := *r
|
newRelay := *r
|
||||||
newRelay.State = state
|
newRelay.State = state
|
||||||
rs.relayForByAddr[newRelay.PeerAddr] = &newRelay
|
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
||||||
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,10 +118,10 @@ func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) GetRelayForByAddr(addr netip.Addr) (*Relay, bool) {
|
func (rs *RelayState) GetRelayForByIp(ip netip.Addr) (*Relay, bool) {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
r, ok := rs.relayForByAddr[addr]
|
r, ok := rs.relayForByIp[ip]
|
||||||
return r, ok
|
return r, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,8 +144,8 @@ func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
|||||||
func (rs *RelayState) CopyRelayForIps() []netip.Addr {
|
func (rs *RelayState) CopyRelayForIps() []netip.Addr {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
currentRelays := make([]netip.Addr, 0, len(rs.relayForByAddr))
|
currentRelays := make([]netip.Addr, 0, len(rs.relayForByIp))
|
||||||
for relayIp := range rs.relayForByAddr {
|
for relayIp := range rs.relayForByIp {
|
||||||
currentRelays = append(currentRelays, relayIp)
|
currentRelays = append(currentRelays, relayIp)
|
||||||
}
|
}
|
||||||
return currentRelays
|
return currentRelays
|
||||||
@ -166,7 +164,7 @@ func (rs *RelayState) CopyRelayForIdxs() []uint32 {
|
|||||||
func (rs *RelayState) CompleteRelayByIP(vpnIp netip.Addr, remoteIdx uint32) bool {
|
func (rs *RelayState) CompleteRelayByIP(vpnIp netip.Addr, remoteIdx uint32) bool {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
r, ok := rs.relayForByAddr[vpnIp]
|
r, ok := rs.relayForByIp[vpnIp]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -174,7 +172,7 @@ func (rs *RelayState) CompleteRelayByIP(vpnIp netip.Addr, remoteIdx uint32) bool
|
|||||||
newRelay.State = Established
|
newRelay.State = Established
|
||||||
newRelay.RemoteIndex = remoteIdx
|
newRelay.RemoteIndex = remoteIdx
|
||||||
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
||||||
rs.relayForByAddr[r.PeerAddr] = &newRelay
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,14 +187,14 @@ func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Re
|
|||||||
newRelay.State = Established
|
newRelay.State = Established
|
||||||
newRelay.RemoteIndex = remoteIdx
|
newRelay.RemoteIndex = remoteIdx
|
||||||
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
||||||
rs.relayForByAddr[r.PeerAddr] = &newRelay
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
||||||
return &newRelay, true
|
return &newRelay, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) QueryRelayForByIp(vpnIp netip.Addr) (*Relay, bool) {
|
func (rs *RelayState) QueryRelayForByIp(vpnIp netip.Addr) (*Relay, bool) {
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
r, ok := rs.relayForByAddr[vpnIp]
|
r, ok := rs.relayForByIp[vpnIp]
|
||||||
return r, ok
|
return r, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,7 +208,7 @@ func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
|
|||||||
func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
|
func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
rs.relayForByAddr[ip] = r
|
rs.relayForByIp[ip] = r
|
||||||
rs.relayForByIdx[idx] = r
|
rs.relayForByIdx[idx] = r
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,16 +219,10 @@ type HostInfo struct {
|
|||||||
ConnectionState *ConnectionState
|
ConnectionState *ConnectionState
|
||||||
remoteIndexId uint32
|
remoteIndexId uint32
|
||||||
localIndexId uint32
|
localIndexId uint32
|
||||||
|
vpnIp netip.Addr
|
||||||
// vpnAddrs is a list of vpn addresses assigned to this host that are within our own vpn networks
|
recvError atomic.Uint32
|
||||||
// The host may have other vpn addresses that are outside our
|
remoteCidr *bart.Table[struct{}]
|
||||||
// vpn networks but were removed because they are not usable
|
relayState RelayState
|
||||||
vpnAddrs []netip.Addr
|
|
||||||
recvError atomic.Uint32
|
|
||||||
|
|
||||||
// networks are both all vpn and unsafe networks assigned to this host
|
|
||||||
networks *bart.Lite
|
|
||||||
relayState RelayState
|
|
||||||
|
|
||||||
// HandshakePacket records the packets used to create this hostinfo
|
// HandshakePacket records the packets used to create this hostinfo
|
||||||
// We need these to avoid replayed handshake packets creating new hostinfos which causes churn
|
// We need these to avoid replayed handshake packets creating new hostinfos which causes churn
|
||||||
@ -286,26 +278,28 @@ type cachedPacketMetrics struct {
|
|||||||
dropped metrics.Counter
|
dropped metrics.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHostMapFromConfig(l *logrus.Logger, c *config.C) *HostMap {
|
func NewHostMapFromConfig(l *logrus.Logger, vpnCIDR netip.Prefix, c *config.C) *HostMap {
|
||||||
hm := newHostMap(l)
|
hm := newHostMap(l, vpnCIDR)
|
||||||
|
|
||||||
hm.reload(c, true)
|
hm.reload(c, true)
|
||||||
c.RegisterReloadCallback(func(c *config.C) {
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
hm.reload(c, false)
|
hm.reload(c, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
l.WithField("preferredRanges", hm.GetPreferredRanges()).
|
l.WithField("network", hm.vpnCIDR.String()).
|
||||||
|
WithField("preferredRanges", hm.GetPreferredRanges()).
|
||||||
Info("Main HostMap created")
|
Info("Main HostMap created")
|
||||||
|
|
||||||
return hm
|
return hm
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHostMap(l *logrus.Logger) *HostMap {
|
func newHostMap(l *logrus.Logger, vpnCIDR netip.Prefix) *HostMap {
|
||||||
return &HostMap{
|
return &HostMap{
|
||||||
Indexes: map[uint32]*HostInfo{},
|
Indexes: map[uint32]*HostInfo{},
|
||||||
Relays: map[uint32]*HostInfo{},
|
Relays: map[uint32]*HostInfo{},
|
||||||
RemoteIndexes: map[uint32]*HostInfo{},
|
RemoteIndexes: map[uint32]*HostInfo{},
|
||||||
Hosts: map[netip.Addr]*HostInfo{},
|
Hosts: map[netip.Addr]*HostInfo{},
|
||||||
|
vpnCIDR: vpnCIDR,
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -348,6 +342,17 @@ func (hm *HostMap) EmitStats() {
|
|||||||
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
|
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
||||||
|
hm.Lock()
|
||||||
|
_, ok := hm.Relays[localIdx]
|
||||||
|
if !ok {
|
||||||
|
hm.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(hm.Relays, localIdx)
|
||||||
|
hm.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
|
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
|
||||||
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
||||||
// Delete the host itself, ensuring it's not modified anymore
|
// Delete the host itself, ensuring it's not modified anymore
|
||||||
@ -367,73 +372,49 @@ func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
||||||
// Get the current primary, if it exists
|
oldHostinfo := hm.Hosts[hostinfo.vpnIp]
|
||||||
oldHostinfo := hm.Hosts[hostinfo.vpnAddrs[0]]
|
|
||||||
|
|
||||||
// Every address in the hostinfo gets elevated to primary
|
|
||||||
for _, vpnAddr := range hostinfo.vpnAddrs {
|
|
||||||
//NOTE: It is possible that we leave a dangling hostinfo here but connection manager works on
|
|
||||||
// indexes so it should be fine.
|
|
||||||
hm.Hosts[vpnAddr] = hostinfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are already primary then we won't bother re-linking
|
|
||||||
if oldHostinfo == hostinfo {
|
if oldHostinfo == hostinfo {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlink this hostinfo
|
|
||||||
if hostinfo.prev != nil {
|
if hostinfo.prev != nil {
|
||||||
hostinfo.prev.next = hostinfo.next
|
hostinfo.prev.next = hostinfo.next
|
||||||
}
|
}
|
||||||
|
|
||||||
if hostinfo.next != nil {
|
if hostinfo.next != nil {
|
||||||
hostinfo.next.prev = hostinfo.prev
|
hostinfo.next.prev = hostinfo.prev
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there wasn't a previous primary then clear out any links
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
||||||
|
|
||||||
if oldHostinfo == nil {
|
if oldHostinfo == nil {
|
||||||
hostinfo.next = nil
|
|
||||||
hostinfo.prev = nil
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Relink the hostinfo as primary
|
|
||||||
hostinfo.next = oldHostinfo
|
hostinfo.next = oldHostinfo
|
||||||
oldHostinfo.prev = hostinfo
|
oldHostinfo.prev = hostinfo
|
||||||
hostinfo.prev = nil
|
hostinfo.prev = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
for _, addr := range hostinfo.vpnAddrs {
|
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
||||||
h := hm.Hosts[addr]
|
|
||||||
for h != nil {
|
|
||||||
if h == hostinfo {
|
|
||||||
hm.unlockedInnerDeleteHostInfo(h, addr)
|
|
||||||
}
|
|
||||||
h = h.next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) unlockedInnerDeleteHostInfo(hostinfo *HostInfo, addr netip.Addr) {
|
|
||||||
primary, ok := hm.Hosts[addr]
|
|
||||||
isLastHostinfo := hostinfo.next == nil && hostinfo.prev == nil
|
isLastHostinfo := hostinfo.next == nil && hostinfo.prev == nil
|
||||||
if ok && primary == hostinfo {
|
if ok && primary == hostinfo {
|
||||||
// The vpn addr pointer points to the same hostinfo as the local index id, we can remove it
|
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
||||||
delete(hm.Hosts, addr)
|
delete(hm.Hosts, hostinfo.vpnIp)
|
||||||
if len(hm.Hosts) == 0 {
|
if len(hm.Hosts) == 0 {
|
||||||
hm.Hosts = map[netip.Addr]*HostInfo{}
|
hm.Hosts = map[netip.Addr]*HostInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hostinfo.next != nil {
|
if hostinfo.next != nil {
|
||||||
// We had more than 1 hostinfo at this vpn addr, promote the next in the list to primary
|
// We had more than 1 hostinfo at this vpnip, promote the next in the list to primary
|
||||||
hm.Hosts[addr] = hostinfo.next
|
hm.Hosts[hostinfo.vpnIp] = hostinfo.next
|
||||||
// It is primary, there is no previous hostinfo now
|
// It is primary, there is no previous hostinfo now
|
||||||
hostinfo.next.prev = nil
|
hostinfo.next.prev = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// Relink if we were in the middle of multiple hostinfos for this vpn addr
|
// Relink if we were in the middle of multiple hostinfos for this vpn ip
|
||||||
if hostinfo.prev != nil {
|
if hostinfo.prev != nil {
|
||||||
hostinfo.prev.next = hostinfo.next
|
hostinfo.prev.next = hostinfo.next
|
||||||
}
|
}
|
||||||
@ -463,13 +444,13 @@ func (hm *HostMap) unlockedInnerDeleteHostInfo(hostinfo *HostInfo, addr netip.Ad
|
|||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
if hm.l.Level >= logrus.DebugLevel {
|
||||||
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
|
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
|
||||||
"vpnAddrs": hostinfo.vpnAddrs, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||||
Debug("Hostmap hostInfo deleted")
|
Debug("Hostmap hostInfo deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
if isLastHostinfo {
|
if isLastHostinfo {
|
||||||
// I have lost connectivity to my peers. My relay tunnel is likely broken. Mark the next
|
// I have lost connectivity to my peers. My relay tunnel is likely broken. Mark the next
|
||||||
// hops as 'Requested' so that new relay tunnels are created in the future.
|
// hops as 'Disestablished' so that new relay tunnels are created in the future.
|
||||||
hm.unlockedDisestablishVpnAddrRelayFor(hostinfo)
|
hm.unlockedDisestablishVpnAddrRelayFor(hostinfo)
|
||||||
}
|
}
|
||||||
// Clean up any local relay indexes for which I am acting as a relay hop
|
// Clean up any local relay indexes for which I am acting as a relay hop
|
||||||
@ -511,11 +492,11 @@ func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryVpnAddr(vpnIp netip.Addr) *HostInfo {
|
func (hm *HostMap) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
||||||
return hm.queryVpnAddr(vpnIp, nil)
|
return hm.queryVpnIp(vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) QueryVpnAddrsRelayFor(targetIps []netip.Addr, relayHostIp netip.Addr) (*HostInfo, *Relay, error) {
|
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp netip.Addr) (*HostInfo, *Relay, error) {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
defer hm.RUnlock()
|
defer hm.RUnlock()
|
||||||
|
|
||||||
@ -523,17 +504,13 @@ func (hm *HostMap) QueryVpnAddrsRelayFor(targetIps []netip.Addr, relayHostIp net
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, errors.New("unable to find host")
|
return nil, nil, errors.New("unable to find host")
|
||||||
}
|
}
|
||||||
|
|
||||||
for h != nil {
|
for h != nil {
|
||||||
for _, targetIp := range targetIps {
|
r, ok := h.relayState.QueryRelayForByIp(targetIp)
|
||||||
r, ok := h.relayState.QueryRelayForByIp(targetIp)
|
if ok && r.State == Established {
|
||||||
if ok && r.State == Established {
|
return h, r, nil
|
||||||
return h, r, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
h = h.next
|
h = h.next
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, errors.New("unable to find host with relay")
|
return nil, nil, errors.New("unable to find host with relay")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -541,16 +518,16 @@ func (hm *HostMap) unlockedDisestablishVpnAddrRelayFor(hi *HostInfo) {
|
|||||||
for _, relayHostIp := range hi.relayState.CopyRelayIps() {
|
for _, relayHostIp := range hi.relayState.CopyRelayIps() {
|
||||||
if h, ok := hm.Hosts[relayHostIp]; ok {
|
if h, ok := hm.Hosts[relayHostIp]; ok {
|
||||||
for h != nil {
|
for h != nil {
|
||||||
h.relayState.UpdateRelayForByIpState(hi.vpnAddrs[0], Disestablished)
|
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
||||||
h = h.next
|
h = h.next
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, rs := range hi.relayState.CopyAllRelayFor() {
|
for _, rs := range hi.relayState.CopyAllRelayFor() {
|
||||||
if rs.Type == ForwardingType {
|
if rs.Type == ForwardingType {
|
||||||
if h, ok := hm.Hosts[rs.PeerAddr]; ok {
|
if h, ok := hm.Hosts[rs.PeerIp]; ok {
|
||||||
for h != nil {
|
for h != nil {
|
||||||
h.relayState.UpdateRelayForByIpState(hi.vpnAddrs[0], Disestablished)
|
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
||||||
h = h.next
|
h = h.next
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -558,7 +535,7 @@ func (hm *HostMap) unlockedDisestablishVpnAddrRelayFor(hi *HostInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) queryVpnAddr(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
|
func (hm *HostMap) queryVpnIp(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
if h, ok := hm.Hosts[vpnIp]; ok {
|
if h, ok := hm.Hosts[vpnIp]; ok {
|
||||||
hm.RUnlock()
|
hm.RUnlock()
|
||||||
@ -579,30 +556,25 @@ func (hm *HostMap) queryVpnAddr(vpnIp netip.Addr, promoteIfce *Interface) *HostI
|
|||||||
func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
||||||
if f.serveDns {
|
if f.serveDns {
|
||||||
remoteCert := hostinfo.ConnectionState.peerCert
|
remoteCert := hostinfo.ConnectionState.peerCert
|
||||||
dnsR.Add(remoteCert.Certificate.Name()+".", hostinfo.vpnAddrs)
|
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
||||||
}
|
}
|
||||||
for _, addr := range hostinfo.vpnAddrs {
|
|
||||||
hm.unlockedInnerAddHostInfo(addr, hostinfo, f)
|
existing := hm.Hosts[hostinfo.vpnIp]
|
||||||
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
||||||
|
|
||||||
|
if existing != nil {
|
||||||
|
hostinfo.next = existing
|
||||||
|
existing.prev = hostinfo
|
||||||
}
|
}
|
||||||
|
|
||||||
hm.Indexes[hostinfo.localIndexId] = hostinfo
|
hm.Indexes[hostinfo.localIndexId] = hostinfo
|
||||||
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
||||||
|
|
||||||
if hm.l.Level >= logrus.DebugLevel {
|
if hm.l.Level >= logrus.DebugLevel {
|
||||||
hm.l.WithField("hostMap", m{"vpnAddrs": hostinfo.vpnAddrs, "mapTotalSize": len(hm.Hosts),
|
hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
||||||
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "vpnAddrs": hostinfo.vpnAddrs}}).
|
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
||||||
Debug("Hostmap vpnIp added")
|
Debug("Hostmap vpnIp added")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (hm *HostMap) unlockedInnerAddHostInfo(vpnAddr netip.Addr, hostinfo *HostInfo, f *Interface) {
|
|
||||||
existing := hm.Hosts[vpnAddr]
|
|
||||||
hm.Hosts[vpnAddr] = hostinfo
|
|
||||||
|
|
||||||
if existing != nil && existing != hostinfo {
|
|
||||||
hostinfo.next = existing
|
|
||||||
existing.prev = hostinfo
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 1
|
i := 1
|
||||||
check := hostinfo
|
check := hostinfo
|
||||||
@ -620,7 +592,7 @@ func (hm *HostMap) GetPreferredRanges() []netip.Prefix {
|
|||||||
return *hm.preferredRanges.Load()
|
return *hm.preferredRanges.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hm *HostMap) ForEachVpnAddr(f controlEach) {
|
func (hm *HostMap) ForEachVpnIp(f controlEach) {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
defer hm.RUnlock()
|
defer hm.RUnlock()
|
||||||
|
|
||||||
@ -674,11 +646,11 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []netip.Prefix, ifce *Interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
|
i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
|
||||||
ifce.lightHouse.QueryServer(i.vpnAddrs[0])
|
ifce.lightHouse.QueryServer(i.vpnIp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) GetCert() *cert.CachedCertificate {
|
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
||||||
if i.ConnectionState != nil {
|
if i.ConnectionState != nil {
|
||||||
return i.ConnectionState.peerCert
|
return i.ConnectionState.peerCert
|
||||||
}
|
}
|
||||||
@ -689,7 +661,7 @@ func (i *HostInfo) SetRemote(remote netip.AddrPort) {
|
|||||||
// We copy here because we likely got this remote from a source that reuses the object
|
// We copy here because we likely got this remote from a source that reuses the object
|
||||||
if i.remote != remote {
|
if i.remote != remote {
|
||||||
i.remote = remote
|
i.remote = remote
|
||||||
i.remotes.LearnRemote(i.vpnAddrs[0], remote)
|
i.remotes.LearnRemote(i.vpnIp, remote)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -740,20 +712,29 @@ func (i *HostInfo) RecvErrorExceeded() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) buildNetworks(networks, unsafeNetworks []netip.Prefix) {
|
func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
|
||||||
if len(networks) == 1 && len(unsafeNetworks) == 0 {
|
if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 {
|
||||||
// Simple case, no CIDRTree needed
|
// Simple case, no CIDRTree needed
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
i.networks = new(bart.Lite)
|
remoteCidr := new(bart.Table[struct{}])
|
||||||
for _, network := range networks {
|
for _, ip := range c.Details.Ips {
|
||||||
i.networks.Insert(network)
|
//TODO: IPV6-WORK what to do when ip is invalid?
|
||||||
|
nip, _ := netip.AddrFromSlice(ip.IP)
|
||||||
|
nip = nip.Unmap()
|
||||||
|
bits, _ := ip.Mask.Size()
|
||||||
|
remoteCidr.Insert(netip.PrefixFrom(nip, bits), struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, network := range unsafeNetworks {
|
for _, n := range c.Details.Subnets {
|
||||||
i.networks.Insert(network)
|
//TODO: IPV6-WORK what to do when ip is invalid?
|
||||||
|
nip, _ := netip.AddrFromSlice(n.IP)
|
||||||
|
nip = nip.Unmap()
|
||||||
|
bits, _ := n.Mask.Size()
|
||||||
|
remoteCidr.Insert(netip.PrefixFrom(nip, bits), struct{}{})
|
||||||
}
|
}
|
||||||
|
i.remoteCidr = remoteCidr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
||||||
@ -761,13 +742,13 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
|||||||
return logrus.NewEntry(l)
|
return logrus.NewEntry(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
li := l.WithField("vpnAddrs", i.vpnAddrs).
|
li := l.WithField("vpnIp", i.vpnIp).
|
||||||
WithField("localIndex", i.localIndexId).
|
WithField("localIndex", i.localIndexId).
|
||||||
WithField("remoteIndex", i.remoteIndexId)
|
WithField("remoteIndex", i.remoteIndexId)
|
||||||
|
|
||||||
if connState := i.ConnectionState; connState != nil {
|
if connState := i.ConnectionState; connState != nil {
|
||||||
if peerCert := connState.peerCert; peerCert != nil {
|
if peerCert := connState.peerCert; peerCert != nil {
|
||||||
li = li.WithField("certName", peerCert.Certificate.Name())
|
li = li.WithField("certName", peerCert.Details.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -776,9 +757,9 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
|||||||
|
|
||||||
// Utility functions
|
// Utility functions
|
||||||
|
|
||||||
func localAddrs(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
|
func localIps(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
|
||||||
//FIXME: This function is pretty garbage
|
//FIXME: This function is pretty garbage
|
||||||
var finalAddrs []netip.Addr
|
var ips []netip.Addr
|
||||||
ifaces, _ := net.Interfaces()
|
ifaces, _ := net.Interfaces()
|
||||||
for _, i := range ifaces {
|
for _, i := range ifaces {
|
||||||
allow := allowList.AllowName(i.Name)
|
allow := allowList.AllowName(i.Name)
|
||||||
@ -790,36 +771,39 @@ func localAddrs(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
addrs, _ := i.Addrs()
|
addrs, _ := i.Addrs()
|
||||||
for _, rawAddr := range addrs {
|
for _, addr := range addrs {
|
||||||
var addr netip.Addr
|
var ip net.IP
|
||||||
switch v := rawAddr.(type) {
|
switch v := addr.(type) {
|
||||||
case *net.IPNet:
|
case *net.IPNet:
|
||||||
//continue
|
//continue
|
||||||
addr, _ = netip.AddrFromSlice(v.IP)
|
ip = v.IP
|
||||||
case *net.IPAddr:
|
case *net.IPAddr:
|
||||||
addr, _ = netip.AddrFromSlice(v.IP)
|
ip = v.IP
|
||||||
}
|
}
|
||||||
|
|
||||||
if !addr.IsValid() {
|
nip, ok := netip.AddrFromSlice(ip)
|
||||||
|
if !ok {
|
||||||
if l.Level >= logrus.DebugLevel {
|
if l.Level >= logrus.DebugLevel {
|
||||||
l.WithField("localAddr", rawAddr).Debug("addr was invalid")
|
l.WithField("localIp", ip).Debug("ip was invalid for netip")
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
addr = addr.Unmap()
|
nip = nip.Unmap()
|
||||||
|
|
||||||
if addr.IsLoopback() == false && addr.IsLinkLocalUnicast() == false {
|
//TODO: Filtering out link local for now, this is probably the most correct thing
|
||||||
isAllowed := allowList.Allow(addr)
|
//TODO: Would be nice to filter out SLAAC MAC based ips as well
|
||||||
|
if nip.IsLoopback() == false && nip.IsLinkLocalUnicast() == false {
|
||||||
|
allow := allowList.Allow(nip)
|
||||||
if l.Level >= logrus.TraceLevel {
|
if l.Level >= logrus.TraceLevel {
|
||||||
l.WithField("localAddr", addr).WithField("allowed", isAllowed).Trace("localAllowList.Allow")
|
l.WithField("localIp", nip).WithField("allow", allow).Trace("localAllowList.Allow")
|
||||||
}
|
}
|
||||||
if !isAllowed {
|
if !allow {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
finalAddrs = append(finalAddrs, addr)
|
ips = append(ips, nip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return finalAddrs
|
return ips
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,14 +12,17 @@ import (
|
|||||||
|
|
||||||
func TestHostMap_MakePrimary(t *testing.T) {
|
func TestHostMap_MakePrimary(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
hm := newHostMap(l)
|
hm := newHostMap(
|
||||||
|
l,
|
||||||
|
netip.MustParsePrefix("10.0.0.1/24"),
|
||||||
|
)
|
||||||
|
|
||||||
f := &Interface{}
|
f := &Interface{}
|
||||||
|
|
||||||
h1 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 1}
|
h1 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 1}
|
||||||
h2 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 2}
|
h2 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 2}
|
||||||
h3 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 3}
|
h3 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 3}
|
||||||
h4 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 4}
|
h4 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 4}
|
||||||
|
|
||||||
hm.unlockedAddHostInfo(h4, f)
|
hm.unlockedAddHostInfo(h4, f)
|
||||||
hm.unlockedAddHostInfo(h3, f)
|
hm.unlockedAddHostInfo(h3, f)
|
||||||
@ -27,7 +30,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.unlockedAddHostInfo(h1, f)
|
hm.unlockedAddHostInfo(h1, f)
|
||||||
|
|
||||||
// Make sure we go h1 -> h2 -> h3 -> h4
|
// Make sure we go h1 -> h2 -> h3 -> h4
|
||||||
prim := hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim := hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -42,7 +45,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.MakePrimary(h3)
|
hm.MakePrimary(h3)
|
||||||
|
|
||||||
// Make sure we go h3 -> h1 -> h2 -> h4
|
// Make sure we go h3 -> h1 -> h2 -> h4
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h3.localIndexId, prim.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -57,7 +60,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.MakePrimary(h4)
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
// Make sure we go h4 -> h3 -> h1 -> h2
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -72,7 +75,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
hm.MakePrimary(h4)
|
hm.MakePrimary(h4)
|
||||||
|
|
||||||
// Make sure we go h4 -> h3 -> h1 -> h2
|
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -86,16 +89,19 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
|||||||
|
|
||||||
func TestHostMap_DeleteHostInfo(t *testing.T) {
|
func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
hm := newHostMap(l)
|
hm := newHostMap(
|
||||||
|
l,
|
||||||
|
netip.MustParsePrefix("10.0.0.1/24"),
|
||||||
|
)
|
||||||
|
|
||||||
f := &Interface{}
|
f := &Interface{}
|
||||||
|
|
||||||
h1 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 1}
|
h1 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 1}
|
||||||
h2 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 2}
|
h2 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 2}
|
||||||
h3 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 3}
|
h3 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 3}
|
||||||
h4 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 4}
|
h4 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 4}
|
||||||
h5 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 5}
|
h5 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 5}
|
||||||
h6 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 6}
|
h6 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 6}
|
||||||
|
|
||||||
hm.unlockedAddHostInfo(h6, f)
|
hm.unlockedAddHostInfo(h6, f)
|
||||||
hm.unlockedAddHostInfo(h5, f)
|
hm.unlockedAddHostInfo(h5, f)
|
||||||
@ -111,7 +117,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h)
|
assert.Nil(t, h)
|
||||||
|
|
||||||
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
|
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
|
||||||
prim := hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim := hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -130,7 +136,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h1.next)
|
assert.Nil(t, h1.next)
|
||||||
|
|
||||||
// Make sure we go h2 -> h3 -> h4 -> h5
|
// Make sure we go h2 -> h3 -> h4 -> h5
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -148,7 +154,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h3.next)
|
assert.Nil(t, h3.next)
|
||||||
|
|
||||||
// Make sure we go h2 -> h4 -> h5
|
// Make sure we go h2 -> h4 -> h5
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -164,7 +170,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h5.next)
|
assert.Nil(t, h5.next)
|
||||||
|
|
||||||
// Make sure we go h2 -> h4
|
// Make sure we go h2 -> h4
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||||
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
@ -178,7 +184,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h2.next)
|
assert.Nil(t, h2.next)
|
||||||
|
|
||||||
// Make sure we only have h4
|
// Make sure we only have h4
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||||
assert.Nil(t, prim.prev)
|
assert.Nil(t, prim.prev)
|
||||||
assert.Nil(t, prim.next)
|
assert.Nil(t, prim.next)
|
||||||
@ -190,7 +196,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
|||||||
assert.Nil(t, h4.next)
|
assert.Nil(t, h4.next)
|
||||||
|
|
||||||
// Make sure we have nil
|
// Make sure we have nil
|
||||||
prim = hm.QueryVpnAddr(netip.MustParseAddr("0.0.0.1"))
|
prim = hm.QueryVpnIp(netip.MustParseAddr("0.0.0.1"))
|
||||||
assert.Nil(t, prim)
|
assert.Nil(t, prim)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,7 +204,11 @@ func TestHostMap_reload(t *testing.T) {
|
|||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
|
|
||||||
hm := NewHostMapFromConfig(l, c)
|
hm := NewHostMapFromConfig(
|
||||||
|
l,
|
||||||
|
netip.MustParsePrefix("10.0.0.1/24"),
|
||||||
|
c,
|
||||||
|
)
|
||||||
|
|
||||||
toS := func(ipn []netip.Prefix) []string {
|
toS := func(ipn []netip.Prefix) []string {
|
||||||
var s []string
|
var s []string
|
||||||
@ -211,36 +221,36 @@ func TestHostMap_reload(t *testing.T) {
|
|||||||
assert.Empty(t, hm.GetPreferredRanges())
|
assert.Empty(t, hm.GetPreferredRanges())
|
||||||
|
|
||||||
c.ReloadConfigString("preferred_ranges: [1.1.1.0/24, 10.1.1.0/24]")
|
c.ReloadConfigString("preferred_ranges: [1.1.1.0/24, 10.1.1.0/24]")
|
||||||
assert.Equal(t, []string{"1.1.1.0/24", "10.1.1.0/24"}, toS(hm.GetPreferredRanges()))
|
assert.EqualValues(t, []string{"1.1.1.0/24", "10.1.1.0/24"}, toS(hm.GetPreferredRanges()))
|
||||||
|
|
||||||
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
||||||
assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
assert.EqualValues(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHostMap_RelayState(t *testing.T) {
|
func TestHostMap_RelayState(t *testing.T) {
|
||||||
h1 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 1}
|
h1 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 1}
|
||||||
a1 := netip.MustParseAddr("::1")
|
a1 := netip.MustParseAddr("::1")
|
||||||
a2 := netip.MustParseAddr("2001::1")
|
a2 := netip.MustParseAddr("2001::1")
|
||||||
|
|
||||||
h1.relayState.InsertRelayTo(a1)
|
h1.relayState.InsertRelayTo(a1)
|
||||||
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
||||||
h1.relayState.InsertRelayTo(a2)
|
h1.relayState.InsertRelayTo(a2)
|
||||||
assert.Equal(t, []netip.Addr{a1, a2}, h1.relayState.relays)
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1, a2})
|
||||||
// Ensure that the first relay added is the first one returned in the copy
|
// Ensure that the first relay added is the first one returned in the copy
|
||||||
currentRelays := h1.relayState.CopyRelayIps()
|
currentRelays := h1.relayState.CopyRelayIps()
|
||||||
require.Len(t, currentRelays, 2)
|
require.Len(t, currentRelays, 2)
|
||||||
assert.Equal(t, a1, currentRelays[0])
|
assert.Equal(t, currentRelays[0], a1)
|
||||||
|
|
||||||
// Deleting the last one in the list works ok
|
// Deleting the last one in the list works ok
|
||||||
h1.relayState.DeleteRelay(a2)
|
h1.relayState.DeleteRelay(a2)
|
||||||
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
||||||
|
|
||||||
// Deleting an element not in the list works ok
|
// Deleting an element not in the list works ok
|
||||||
h1.relayState.DeleteRelay(a2)
|
h1.relayState.DeleteRelay(a2)
|
||||||
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
||||||
|
|
||||||
// Deleting the only element in the list works ok
|
// Deleting the only element in the list works ok
|
||||||
h1.relayState.DeleteRelay(a1)
|
h1.relayState.DeleteRelay(a1)
|
||||||
assert.Equal(t, []netip.Addr{}, h1.relayState.relays)
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (i *HostInfo) GetVpnAddrs() []netip.Addr {
|
func (i *HostInfo) GetVpnIp() netip.Addr {
|
||||||
return i.vpnAddrs
|
return i.vpnIp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) GetLocalIndex() uint32 {
|
func (i *HostInfo) GetLocalIndex() uint32 {
|
||||||
|
|||||||
132
inside.go
132
inside.go
@ -8,7 +8,6 @@ import (
|
|||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/noiseutil"
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
"github.com/slackhq/nebula/routing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
@ -21,16 +20,14 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ignore local broadcast packets
|
// Ignore local broadcast packets
|
||||||
if f.dropLocalBroadcast {
|
if f.dropLocalBroadcast && fwPacket.RemoteIP == f.myBroadcastAddr {
|
||||||
if f.myBroadcastAddrsTable.Contains(fwPacket.RemoteAddr) {
|
return
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.myVpnAddrsTable.Contains(fwPacket.RemoteAddr) {
|
if fwPacket.RemoteIP == f.myVpnNet.Addr() {
|
||||||
// Immediately forward packets from self to self.
|
// Immediately forward packets from self to self.
|
||||||
// This should only happen on Darwin-based and FreeBSD hosts, which
|
// This should only happen on Darwin-based and FreeBSD hosts, which
|
||||||
// routes packets from the Nebula addr to the Nebula addr through the Nebula
|
// routes packets from the Nebula IP to the Nebula IP through the Nebula
|
||||||
// TUN device.
|
// TUN device.
|
||||||
if immediatelyForwardToSelf {
|
if immediatelyForwardToSelf {
|
||||||
_, err := f.readers[q].Write(packet)
|
_, err := f.readers[q].Write(packet)
|
||||||
@ -39,25 +36,25 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Otherwise, drop. On linux, we should never see these packets - Linux
|
// Otherwise, drop. On linux, we should never see these packets - Linux
|
||||||
// routes packets from the nebula addr to the nebula addr through the loopback device.
|
// routes packets from the nebula IP to the nebula IP through the loopback device.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore multicast packets
|
// Ignore multicast packets
|
||||||
if f.dropMulticast && fwPacket.RemoteAddr.IsMulticast() {
|
if f.dropMulticast && fwPacket.RemoteIP.IsMulticast() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo, ready := f.getOrHandshakeConsiderRouting(fwPacket, func(hh *HandshakeHostInfo) {
|
hostinfo, ready := f.getOrHandshake(fwPacket.RemoteIP, func(hh *HandshakeHostInfo) {
|
||||||
hh.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
hh.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
||||||
})
|
})
|
||||||
|
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
f.rejectInside(packet, out, q)
|
f.rejectInside(packet, out, q)
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("vpnAddr", fwPacket.RemoteAddr).
|
f.l.WithField("vpnIp", fwPacket.RemoteIP).
|
||||||
WithField("fwPacket", fwPacket).
|
WithField("fwPacket", fwPacket).
|
||||||
Debugln("dropping outbound packet, vpnAddr not in our vpn networks or in unsafe networks")
|
Debugln("dropping outbound packet, vpnIp not in our CIDR or in unsafe routes")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -120,93 +117,21 @@ func (f *Interface) rejectOutside(packet []byte, ci *ConnectionState, hostinfo *
|
|||||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, netip.AddrPort{}, out, nb, packet, q)
|
f.sendNoMetrics(header.Message, 0, ci, hostinfo, netip.AddrPort{}, out, nb, packet, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handshake will attempt to initiate a tunnel with the provided vpn address if it is within our vpn networks. This is a no-op if the tunnel is already established or being established
|
func (f *Interface) Handshake(vpnIp netip.Addr) {
|
||||||
func (f *Interface) Handshake(vpnAddr netip.Addr) {
|
f.getOrHandshake(vpnIp, nil)
|
||||||
f.getOrHandshakeNoRouting(vpnAddr, nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getOrHandshakeNoRouting returns nil if the vpnAddr is not routable.
|
// getOrHandshake returns nil if the vpnIp is not routable.
|
||||||
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
|
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
|
||||||
func (f *Interface) getOrHandshakeNoRouting(vpnAddr netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
func (f *Interface) getOrHandshake(vpnIp netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||||
if f.myVpnNetworksTable.Contains(vpnAddr) {
|
if !f.myVpnNet.Contains(vpnIp) {
|
||||||
return f.handshakeManager.GetOrHandshake(vpnAddr, cacheCallback)
|
vpnIp = f.inside.RouteFor(vpnIp)
|
||||||
}
|
if !vpnIp.IsValid() {
|
||||||
|
return nil, false
|
||||||
return nil, false
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// getOrHandshakeConsiderRouting will try to find the HostInfo to handle this packet, starting a handshake if necessary.
|
|
||||||
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel.
|
|
||||||
func (f *Interface) getOrHandshakeConsiderRouting(fwPacket *firewall.Packet, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
|
||||||
|
|
||||||
destinationAddr := fwPacket.RemoteAddr
|
|
||||||
|
|
||||||
hostinfo, ready := f.getOrHandshakeNoRouting(destinationAddr, cacheCallback)
|
|
||||||
|
|
||||||
// Host is inside the mesh, no routing required
|
|
||||||
if hostinfo != nil {
|
|
||||||
return hostinfo, ready
|
|
||||||
}
|
|
||||||
|
|
||||||
gateways := f.inside.RoutesFor(destinationAddr)
|
|
||||||
|
|
||||||
switch len(gateways) {
|
|
||||||
case 0:
|
|
||||||
return nil, false
|
|
||||||
case 1:
|
|
||||||
// Single gateway route
|
|
||||||
return f.handshakeManager.GetOrHandshake(gateways[0].Addr(), cacheCallback)
|
|
||||||
default:
|
|
||||||
// Multi gateway route, perform ECMP categorization
|
|
||||||
gatewayAddr, balancingOk := routing.BalancePacket(fwPacket, gateways)
|
|
||||||
|
|
||||||
if !balancingOk {
|
|
||||||
// This happens if the gateway buckets were not calculated, this _should_ never happen
|
|
||||||
f.l.Error("Gateway buckets not calculated, fallback from ECMP to random routing. Please report this bug.")
|
|
||||||
}
|
|
||||||
|
|
||||||
var handshakeInfoForChosenGateway *HandshakeHostInfo
|
|
||||||
var hhReceiver = func(hh *HandshakeHostInfo) {
|
|
||||||
handshakeInfoForChosenGateway = hh
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the handshakeHostInfo for later.
|
|
||||||
// If this node is not reachable we will attempt other nodes, if none are reachable we will
|
|
||||||
// cache the packet for this gateway.
|
|
||||||
if hostinfo, ready = f.handshakeManager.GetOrHandshake(gatewayAddr, hhReceiver); ready {
|
|
||||||
return hostinfo, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// It appears the selected gateway cannot be reached, find another gateway to fallback on.
|
|
||||||
// The current implementation breaks ECMP but that seems better than no connectivity.
|
|
||||||
// If ECMP is also required when a gateway is down then connectivity status
|
|
||||||
// for each gateway needs to be kept and the weights recalculated when they go up or down.
|
|
||||||
// This would also need to interact with unsafe_route updates through reloading the config or
|
|
||||||
// use of the use_system_route_table option
|
|
||||||
|
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
|
||||||
f.l.WithField("destination", destinationAddr).
|
|
||||||
WithField("originalGateway", gatewayAddr).
|
|
||||||
Debugln("Calculated gateway for ECMP not available, attempting other gateways")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range gateways {
|
|
||||||
// Skip the gateway that failed previously
|
|
||||||
if gateways[i].Addr() == gatewayAddr {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We do not need the HandshakeHostInfo since we cache the packet in the originally chosen gateway
|
|
||||||
if hostinfo, ready = f.handshakeManager.GetOrHandshake(gateways[i].Addr(), nil); ready {
|
|
||||||
return hostinfo, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No gateways reachable, cache the packet in the originally chosen gateway
|
|
||||||
cacheCallback(handshakeInfoForChosenGateway)
|
|
||||||
return hostinfo, false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return f.handshakeManager.GetOrHandshake(vpnIp, cacheCallback)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||||
@ -231,16 +156,16 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
|||||||
f.sendNoMetrics(header.Message, st, hostinfo.ConnectionState, hostinfo, netip.AddrPort{}, p, nb, out, 0)
|
f.sendNoMetrics(header.Message, st, hostinfo.ConnectionState, hostinfo, netip.AddrPort{}, p, nb, out, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessageToVpnAddr handles real addr:port lookup and sends to the current best known address for vpnAddr
|
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
||||||
func (f *Interface) SendMessageToVpnAddr(t header.MessageType, st header.MessageSubType, vpnAddr netip.Addr, p, nb, out []byte) {
|
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte) {
|
||||||
hostInfo, ready := f.getOrHandshakeNoRouting(vpnAddr, func(hh *HandshakeHostInfo) {
|
hostInfo, ready := f.getOrHandshake(vpnIp, func(hh *HandshakeHostInfo) {
|
||||||
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
||||||
})
|
})
|
||||||
|
|
||||||
if hostInfo == nil {
|
if hostInfo == nil {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("vpnAddr", vpnAddr).
|
f.l.WithField("vpnIp", vpnIp).
|
||||||
Debugln("dropping SendMessageToVpnAddr, vpnAddr not in our vpn networks or in unsafe routes")
|
Debugln("dropping SendMessageToVpnIp, vpnIp not in our CIDR or in unsafe routes")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -333,6 +258,7 @@ func (f *Interface) SendVia(via *HostInfo,
|
|||||||
|
|
||||||
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote netip.AddrPort, p, nb, out []byte, q int) {
|
func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote netip.AddrPort, p, nb, out []byte, q int) {
|
||||||
if ci.eKey == nil {
|
if ci.eKey == nil {
|
||||||
|
//TODO: log warning
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
useRelay := !remote.IsValid() && !hostinfo.remote.IsValid()
|
useRelay := !remote.IsValid() && !hostinfo.remote.IsValid()
|
||||||
@ -359,14 +285,14 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
f.connectionManager.Out(hostinfo)
|
f.connectionManager.Out(hostinfo)
|
||||||
|
|
||||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||||
// all our addrs and enable a faster roaming.
|
// all our IPs and enable a faster roaming.
|
||||||
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
|
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
|
||||||
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
|
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
|
||||||
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
|
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
|
||||||
f.lightHouse.QueryServer(hostinfo.vpnAddrs[0])
|
f.lightHouse.QueryServer(hostinfo.vpnIp)
|
||||||
hostinfo.lastRebindCount = f.rebindCount
|
hostinfo.lastRebindCount = f.rebindCount
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("vpnAddrs", hostinfo.vpnAddrs).Debug("Lighthouse update triggered for punch due to rebind counter")
|
f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -398,7 +324,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
} else {
|
} else {
|
||||||
// Try to send via a relay
|
// Try to send via a relay
|
||||||
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
||||||
relayHostInfo, relay, err := f.hostMap.QueryVpnAddrsRelayFor(hostinfo.vpnAddrs, relayIP)
|
relayHostInfo, relay, err := f.hostMap.QueryVpnIpRelayFor(hostinfo.vpnIp, relayIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.relayState.DeleteRelay(relayIP)
|
hostinfo.relayState.DeleteRelay(relayIP)
|
||||||
hostinfo.logger(f.l).WithField("relay", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
hostinfo.logger(f.l).WithField("relay", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
||||||
|
|||||||
162
interface.go
162
interface.go
@ -2,6 +2,7 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -11,7 +12,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
@ -51,27 +51,25 @@ type InterfaceConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Interface struct {
|
type Interface struct {
|
||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
outside udp.Conn
|
outside udp.Conn
|
||||||
inside overlay.Device
|
inside overlay.Device
|
||||||
pki *PKI
|
pki *PKI
|
||||||
firewall *Firewall
|
cipher string
|
||||||
connectionManager *connectionManager
|
firewall *Firewall
|
||||||
handshakeManager *HandshakeManager
|
connectionManager *connectionManager
|
||||||
serveDns bool
|
handshakeManager *HandshakeManager
|
||||||
createTime time.Time
|
serveDns bool
|
||||||
lightHouse *LightHouse
|
createTime time.Time
|
||||||
myBroadcastAddrsTable *bart.Lite
|
lightHouse *LightHouse
|
||||||
myVpnAddrs []netip.Addr // A list of addresses assigned to us via our certificate
|
myBroadcastAddr netip.Addr
|
||||||
myVpnAddrsTable *bart.Lite
|
myVpnNet netip.Prefix
|
||||||
myVpnNetworks []netip.Prefix // A list of networks assigned to us via our certificate
|
dropLocalBroadcast bool
|
||||||
myVpnNetworksTable *bart.Lite
|
dropMulticast bool
|
||||||
dropLocalBroadcast bool
|
routines int
|
||||||
dropMulticast bool
|
disconnectInvalid atomic.Bool
|
||||||
routines int
|
closed atomic.Bool
|
||||||
disconnectInvalid atomic.Bool
|
relayManager *relayManager
|
||||||
closed atomic.Bool
|
|
||||||
relayManager *relayManager
|
|
||||||
|
|
||||||
tryPromoteEvery atomic.Uint32
|
tryPromoteEvery atomic.Uint32
|
||||||
reQueryEvery atomic.Uint32
|
reQueryEvery atomic.Uint32
|
||||||
@ -103,11 +101,9 @@ type EncWriter interface {
|
|||||||
out []byte,
|
out []byte,
|
||||||
nocopy bool,
|
nocopy bool,
|
||||||
)
|
)
|
||||||
SendMessageToVpnAddr(t header.MessageType, st header.MessageSubType, vpnAddr netip.Addr, p, nb, out []byte)
|
SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte)
|
||||||
SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte)
|
SendMessageToHostInfo(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte)
|
||||||
Handshake(vpnAddr netip.Addr)
|
Handshake(vpnIp netip.Addr)
|
||||||
GetHostInfo(vpnAddr netip.Addr) *HostInfo
|
|
||||||
GetCertState() *CertState
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type sendRecvErrorConfig uint8
|
type sendRecvErrorConfig uint8
|
||||||
@ -118,10 +114,10 @@ const (
|
|||||||
sendRecvErrorPrivate
|
sendRecvErrorPrivate
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s sendRecvErrorConfig) ShouldSendRecvError(endpoint netip.AddrPort) bool {
|
func (s sendRecvErrorConfig) ShouldSendRecvError(ip netip.AddrPort) bool {
|
||||||
switch s {
|
switch s {
|
||||||
case sendRecvErrorPrivate:
|
case sendRecvErrorPrivate:
|
||||||
return endpoint.Addr().IsPrivate()
|
return ip.Addr().IsPrivate()
|
||||||
case sendRecvErrorAlways:
|
case sendRecvErrorAlways:
|
||||||
return true
|
return true
|
||||||
case sendRecvErrorNever:
|
case sendRecvErrorNever:
|
||||||
@ -161,30 +157,49 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
return nil, errors.New("no connection manager")
|
return nil, errors.New("no connection manager")
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := c.pki.getCertState()
|
certificate := c.pki.GetCertState().Certificate
|
||||||
|
|
||||||
|
myVpnAddr, ok := netip.AddrFromSlice(certificate.Details.Ips[0].IP)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid ip address in certificate: %s", certificate.Details.Ips[0].IP)
|
||||||
|
}
|
||||||
|
|
||||||
|
myVpnMask, ok := netip.AddrFromSlice(certificate.Details.Ips[0].Mask)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid ip mask in certificate: %s", certificate.Details.Ips[0].Mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
myVpnAddr = myVpnAddr.Unmap()
|
||||||
|
myVpnMask = myVpnMask.Unmap()
|
||||||
|
|
||||||
|
if myVpnAddr.BitLen() != myVpnMask.BitLen() {
|
||||||
|
return nil, fmt.Errorf("ip address and mask are different lengths in certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
ones, _ := certificate.Details.Ips[0].Mask.Size()
|
||||||
|
myVpnNet := netip.PrefixFrom(myVpnAddr, ones)
|
||||||
|
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
pki: c.pki,
|
pki: c.pki,
|
||||||
hostMap: c.HostMap,
|
hostMap: c.HostMap,
|
||||||
outside: c.Outside,
|
outside: c.Outside,
|
||||||
inside: c.Inside,
|
inside: c.Inside,
|
||||||
firewall: c.Firewall,
|
cipher: c.Cipher,
|
||||||
serveDns: c.ServeDns,
|
firewall: c.Firewall,
|
||||||
handshakeManager: c.HandshakeManager,
|
serveDns: c.ServeDns,
|
||||||
createTime: time.Now(),
|
handshakeManager: c.HandshakeManager,
|
||||||
lightHouse: c.lightHouse,
|
createTime: time.Now(),
|
||||||
dropLocalBroadcast: c.DropLocalBroadcast,
|
lightHouse: c.lightHouse,
|
||||||
dropMulticast: c.DropMulticast,
|
dropLocalBroadcast: c.DropLocalBroadcast,
|
||||||
routines: c.routines,
|
dropMulticast: c.DropMulticast,
|
||||||
version: c.version,
|
routines: c.routines,
|
||||||
writers: make([]udp.Conn, c.routines),
|
version: c.version,
|
||||||
readers: make([]io.ReadWriteCloser, c.routines),
|
writers: make([]udp.Conn, c.routines),
|
||||||
myVpnNetworks: cs.myVpnNetworks,
|
readers: make([]io.ReadWriteCloser, c.routines),
|
||||||
myVpnNetworksTable: cs.myVpnNetworksTable,
|
myVpnNet: myVpnNet,
|
||||||
myVpnAddrs: cs.myVpnAddrs,
|
relayManager: c.relayManager,
|
||||||
myVpnAddrsTable: cs.myVpnAddrsTable,
|
connectionManager: c.connectionManager,
|
||||||
myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable,
|
|
||||||
relayManager: c.relayManager,
|
|
||||||
connectionManager: c.connectionManager,
|
|
||||||
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
||||||
|
|
||||||
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||||
@ -197,6 +212,12 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
l: c.l,
|
l: c.l,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if myVpnAddr.Is4() {
|
||||||
|
addr := myVpnNet.Masked().Addr().As4()
|
||||||
|
binary.BigEndian.PutUint32(addr[:], binary.BigEndian.Uint32(addr[:])|^binary.BigEndian.Uint32(certificate.Details.Ips[0].Mask))
|
||||||
|
ifce.myBroadcastAddr = netip.AddrFrom4(addr)
|
||||||
|
}
|
||||||
|
|
||||||
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
||||||
ifce.reQueryEvery.Store(c.reQueryEvery)
|
ifce.reQueryEvery.Store(c.reQueryEvery)
|
||||||
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
||||||
@ -217,7 +238,7 @@ func (f *Interface) activate() {
|
|||||||
f.l.WithError(err).Error("Failed to get udp listen address")
|
f.l.WithError(err).Error("Failed to get udp listen address")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.l.WithField("interface", f.inside.Name()).WithField("networks", f.myVpnNetworks).
|
f.l.WithField("interface", f.inside.Name()).WithField("network", f.inside.Cidr().String()).
|
||||||
WithField("build", f.version).WithField("udpAddr", addr).
|
WithField("build", f.version).WithField("udpAddr", addr).
|
||||||
WithField("boringcrypto", boringEnabled()).
|
WithField("boringcrypto", boringEnabled()).
|
||||||
Info("Nebula interface is active")
|
Info("Nebula interface is active")
|
||||||
@ -258,22 +279,16 @@ func (f *Interface) listenOut(i int) {
|
|||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
|
|
||||||
var li udp.Conn
|
var li udp.Conn
|
||||||
|
// TODO clean this up with a coherent interface for each outside connection
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
li = f.writers[i]
|
li = f.writers[i]
|
||||||
} else {
|
} else {
|
||||||
li = f.outside
|
li = f.outside
|
||||||
}
|
}
|
||||||
|
|
||||||
ctCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
|
||||||
lhh := f.lightHouse.NewRequestHandler()
|
lhh := f.lightHouse.NewRequestHandler()
|
||||||
plaintext := make([]byte, udp.MTU)
|
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||||
h := &header.H{}
|
li.ListenOut(readOutsidePackets(f), lhHandleRequest(lhh, f), conntrackCache, i)
|
||||||
fwPacket := &firewall.Packet{}
|
|
||||||
nb := make([]byte, 12, 12)
|
|
||||||
|
|
||||||
li.ListenOut(func(fromUdpAddr netip.AddrPort, payload []byte) {
|
|
||||||
f.readOutsidePackets(fromUdpAddr, nil, plaintext[:0], payload, h, fwPacket, lhh, nb, i, ctCache.Get(f.l))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
||||||
@ -330,7 +345,7 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fw, err := NewFirewallFromConfig(f.l, f.pki.getCertState(), c)
|
fw, err := NewFirewallFromConfig(f.l, f.pki.GetCertState().Certificate, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).Error("Error while creating firewall during reload")
|
f.l.WithError(err).Error("Error while creating firewall during reload")
|
||||||
return
|
return
|
||||||
@ -413,8 +428,6 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
|||||||
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
||||||
|
|
||||||
certExpirationGauge := metrics.GetOrRegisterGauge("certificate.ttl_seconds", nil)
|
certExpirationGauge := metrics.GetOrRegisterGauge("certificate.ttl_seconds", nil)
|
||||||
certInitiatingVersion := metrics.GetOrRegisterGauge("certificate.initiating_version", nil)
|
|
||||||
certMaxVersion := metrics.GetOrRegisterGauge("certificate.max_version", nil)
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -424,30 +437,11 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
|||||||
f.firewall.EmitStats()
|
f.firewall.EmitStats()
|
||||||
f.handshakeManager.EmitStats()
|
f.handshakeManager.EmitStats()
|
||||||
udpStats()
|
udpStats()
|
||||||
|
certExpirationGauge.Update(int64(f.pki.GetCertState().Certificate.Details.NotAfter.Sub(time.Now()) / time.Second))
|
||||||
certState := f.pki.getCertState()
|
|
||||||
defaultCrt := certState.GetDefaultCertificate()
|
|
||||||
certExpirationGauge.Update(int64(defaultCrt.NotAfter().Sub(time.Now()) / time.Second))
|
|
||||||
certInitiatingVersion.Update(int64(defaultCrt.Version()))
|
|
||||||
|
|
||||||
// Report the max certificate version we are capable of using
|
|
||||||
if certState.v2Cert != nil {
|
|
||||||
certMaxVersion.Update(int64(certState.v2Cert.Version()))
|
|
||||||
} else {
|
|
||||||
certMaxVersion.Update(int64(certState.v1Cert.Version()))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) GetHostInfo(vpnIp netip.Addr) *HostInfo {
|
|
||||||
return f.hostMap.QueryVpnAddr(vpnIp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Interface) GetCertState() *CertState {
|
|
||||||
return f.pki.getCertState()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Interface) Close() error {
|
func (f *Interface) Close() error {
|
||||||
f.closed.Store(true)
|
f.closed.Store(true)
|
||||||
|
|
||||||
|
|||||||
@ -6,6 +6,8 @@ import (
|
|||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: IPV6-WORK can probably delete this
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Need 96 bytes for the largest reject packet:
|
// Need 96 bytes for the largest reject packet:
|
||||||
// - 20 byte ipv4 header
|
// - 20 byte ipv4 header
|
||||||
|
|||||||
923
lighthouse.go
923
lighthouse.go
File diff suppressed because it is too large
Load Diff
@ -7,146 +7,149 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"github.com/slackhq/nebula/cert"
|
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"gopkg.in/yaml.v2"
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: Add a test to ensure udpAddr is copied and not reused
|
||||||
|
|
||||||
func TestOldIPv4Only(t *testing.T) {
|
func TestOldIPv4Only(t *testing.T) {
|
||||||
// This test ensures our new ipv6 enabled LH protobuf IpAndPorts works with the old style to enable backwards compatibility
|
// This test ensures our new ipv6 enabled LH protobuf IpAndPorts works with the old style to enable backwards compatibility
|
||||||
b := []byte{8, 129, 130, 132, 80, 16, 10}
|
b := []byte{8, 129, 130, 132, 80, 16, 10}
|
||||||
var m V4AddrPort
|
var m Ip4AndPort
|
||||||
err := m.Unmarshal(b)
|
err := m.Unmarshal(b)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
ip := netip.MustParseAddr("10.1.1.1")
|
ip := netip.MustParseAddr("10.1.1.1")
|
||||||
bp := ip.As4()
|
bp := ip.As4()
|
||||||
assert.Equal(t, binary.BigEndian.Uint32(bp[:]), m.GetAddr())
|
assert.Equal(t, binary.BigEndian.Uint32(bp[:]), m.GetIp())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewLhQuery(t *testing.T) {
|
||||||
|
myIp, err := netip.ParseAddr("192.1.1.1")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Generating a new lh query should work
|
||||||
|
a := NewLhQueryByInt(myIp)
|
||||||
|
|
||||||
|
// The result should be a nebulameta protobuf
|
||||||
|
assert.IsType(t, &NebulaMeta{}, a)
|
||||||
|
|
||||||
|
// It should also Marshal fine
|
||||||
|
b, err := a.Marshal()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// and then Unmarshal fine
|
||||||
|
n := &NebulaMeta{}
|
||||||
|
err = n.Unmarshal(b)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_lhStaticMapping(t *testing.T) {
|
func Test_lhStaticMapping(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
||||||
nt := new(bart.Lite)
|
|
||||||
nt.Insert(myVpnNet)
|
|
||||||
cs := &CertState{
|
|
||||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
|
||||||
myVpnNetworksTable: nt,
|
|
||||||
}
|
|
||||||
lh1 := "10.128.0.2"
|
lh1 := "10.128.0.2"
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[string]any{"hosts": []any{lh1}}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
|
||||||
c.Settings["static_host_map"] = map[string]any{lh1: []any{"1.1.1.1:4242"}}
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||||
_, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
_, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
lh2 := "10.128.0.3"
|
lh2 := "10.128.0.3"
|
||||||
c = config.NewC(l)
|
c = config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[string]any{"hosts": []any{lh1, lh2}}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
|
||||||
c.Settings["static_host_map"] = map[string]any{lh1: []any{"100.1.1.1:4242"}}
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
|
||||||
_, err = NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
_, err = NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
require.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReloadLighthouseInterval(t *testing.T) {
|
func TestReloadLighthouseInterval(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
||||||
nt := new(bart.Lite)
|
|
||||||
nt.Insert(myVpnNet)
|
|
||||||
cs := &CertState{
|
|
||||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
|
||||||
myVpnNetworksTable: nt,
|
|
||||||
}
|
|
||||||
lh1 := "10.128.0.2"
|
lh1 := "10.128.0.2"
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[string]any{
|
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||||
"hosts": []any{lh1},
|
"hosts": []interface{}{lh1},
|
||||||
"interval": "1s",
|
"interval": "1s",
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Settings["static_host_map"] = map[string]any{lh1: []any{"1.1.1.1:4242"}}
|
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
lh.ifce = &mockEncWriter{}
|
lh.ifce = &mockEncWriter{}
|
||||||
|
|
||||||
// The first one routine is kicked off by main.go currently, lets make sure that one dies
|
// The first one routine is kicked off by main.go currently, lets make sure that one dies
|
||||||
require.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 5"))
|
assert.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 5"))
|
||||||
assert.Equal(t, int64(5), lh.interval.Load())
|
assert.Equal(t, int64(5), lh.interval.Load())
|
||||||
|
|
||||||
// Subsequent calls are killed off by the LightHouse.Reload function
|
// Subsequent calls are killed off by the LightHouse.Reload function
|
||||||
require.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 10"))
|
assert.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 10"))
|
||||||
assert.Equal(t, int64(10), lh.interval.Load())
|
assert.Equal(t, int64(10), lh.interval.Load())
|
||||||
|
|
||||||
// If this completes then nothing is stealing our reload routine
|
// If this completes then nothing is stealing our reload routine
|
||||||
require.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 11"))
|
assert.NoError(t, c.ReloadConfigString("lighthouse:\n interval: 11"))
|
||||||
assert.Equal(t, int64(11), lh.interval.Load())
|
assert.Equal(t, int64(11), lh.interval.Load())
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/0")
|
myVpnNet := netip.MustParsePrefix("10.128.0.1/0")
|
||||||
nt := new(bart.Lite)
|
|
||||||
nt.Insert(myVpnNet)
|
|
||||||
cs := &CertState{
|
|
||||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
|
||||||
myVpnNetworksTable: nt,
|
|
||||||
}
|
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||||
require.NoError(b, err)
|
if !assert.NoError(b, err) {
|
||||||
|
b.Fatal()
|
||||||
|
}
|
||||||
|
|
||||||
hAddr := netip.MustParseAddrPort("4.5.6.7:12345")
|
hAddr := netip.MustParseAddrPort("4.5.6.7:12345")
|
||||||
hAddr2 := netip.MustParseAddrPort("4.5.6.7:12346")
|
hAddr2 := netip.MustParseAddrPort("4.5.6.7:12346")
|
||||||
|
|
||||||
vpnIp3 := netip.MustParseAddr("0.0.0.3")
|
vpnIp3 := netip.MustParseAddr("0.0.0.3")
|
||||||
lh.addrMap[vpnIp3] = NewRemoteList([]netip.Addr{vpnIp3}, nil)
|
lh.addrMap[vpnIp3] = NewRemoteList(nil)
|
||||||
lh.addrMap[vpnIp3].unlockedSetV4(
|
lh.addrMap[vpnIp3].unlockedSetV4(
|
||||||
vpnIp3,
|
vpnIp3,
|
||||||
vpnIp3,
|
vpnIp3,
|
||||||
[]*V4AddrPort{
|
[]*Ip4AndPort{
|
||||||
netAddrToProtoV4AddrPort(hAddr.Addr(), hAddr.Port()),
|
NewIp4AndPortFromNetIP(hAddr.Addr(), hAddr.Port()),
|
||||||
netAddrToProtoV4AddrPort(hAddr2.Addr(), hAddr2.Port()),
|
NewIp4AndPortFromNetIP(hAddr2.Addr(), hAddr2.Port()),
|
||||||
},
|
},
|
||||||
func(netip.Addr, *V4AddrPort) bool { return true },
|
func(netip.Addr, *Ip4AndPort) bool { return true },
|
||||||
)
|
)
|
||||||
|
|
||||||
rAddr := netip.MustParseAddrPort("1.2.2.3:12345")
|
rAddr := netip.MustParseAddrPort("1.2.2.3:12345")
|
||||||
rAddr2 := netip.MustParseAddrPort("1.2.2.3:12346")
|
rAddr2 := netip.MustParseAddrPort("1.2.2.3:12346")
|
||||||
vpnIp2 := netip.MustParseAddr("0.0.0.3")
|
vpnIp2 := netip.MustParseAddr("0.0.0.3")
|
||||||
lh.addrMap[vpnIp2] = NewRemoteList([]netip.Addr{vpnIp2}, nil)
|
lh.addrMap[vpnIp2] = NewRemoteList(nil)
|
||||||
lh.addrMap[vpnIp2].unlockedSetV4(
|
lh.addrMap[vpnIp2].unlockedSetV4(
|
||||||
vpnIp3,
|
vpnIp3,
|
||||||
vpnIp3,
|
vpnIp3,
|
||||||
[]*V4AddrPort{
|
[]*Ip4AndPort{
|
||||||
netAddrToProtoV4AddrPort(rAddr.Addr(), rAddr.Port()),
|
NewIp4AndPortFromNetIP(rAddr.Addr(), rAddr.Port()),
|
||||||
netAddrToProtoV4AddrPort(rAddr2.Addr(), rAddr2.Port()),
|
NewIp4AndPortFromNetIP(rAddr2.Addr(), rAddr2.Port()),
|
||||||
},
|
},
|
||||||
func(netip.Addr, *V4AddrPort) bool { return true },
|
func(netip.Addr, *Ip4AndPort) bool { return true },
|
||||||
)
|
)
|
||||||
|
|
||||||
mw := &mockEncWriter{}
|
mw := &mockEncWriter{}
|
||||||
|
|
||||||
hi := []netip.Addr{vpnIp2}
|
|
||||||
b.Run("notfound", func(b *testing.B) {
|
b.Run("notfound", func(b *testing.B) {
|
||||||
lhh := lh.NewRequestHandler()
|
lhh := lh.NewRequestHandler()
|
||||||
req := &NebulaMeta{
|
req := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostQuery,
|
Type: NebulaMeta_HostQuery,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
OldVpnAddr: 4,
|
VpnIp: 4,
|
||||||
V4AddrPorts: nil,
|
Ip4AndPorts: nil,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
p, err := req.Marshal()
|
p, err := req.Marshal()
|
||||||
require.NoError(b, err)
|
assert.NoError(b, err)
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
lhh.HandleRequest(rAddr, hi, p, mw)
|
lhh.HandleRequest(rAddr, vpnIp2, p, mw)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run("found", func(b *testing.B) {
|
b.Run("found", func(b *testing.B) {
|
||||||
@ -154,15 +157,15 @@ func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
|||||||
req := &NebulaMeta{
|
req := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostQuery,
|
Type: NebulaMeta_HostQuery,
|
||||||
Details: &NebulaMetaDetails{
|
Details: &NebulaMetaDetails{
|
||||||
OldVpnAddr: 3,
|
VpnIp: 3,
|
||||||
V4AddrPorts: nil,
|
Ip4AndPorts: nil,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
p, err := req.Marshal()
|
p, err := req.Marshal()
|
||||||
require.NoError(b, err)
|
assert.NoError(b, err)
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
lhh.HandleRequest(rAddr, hi, p, mw)
|
lhh.HandleRequest(rAddr, vpnIp2, p, mw)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -192,51 +195,42 @@ func TestLighthouse_Memory(t *testing.T) {
|
|||||||
theirVpnIp := netip.MustParseAddr("10.128.0.3")
|
theirVpnIp := netip.MustParseAddr("10.128.0.3")
|
||||||
|
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[string]any{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[string]any{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, netip.MustParsePrefix("10.128.0.1/24"), nil, nil)
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
assert.NoError(t, err)
|
||||||
nt := new(bart.Lite)
|
|
||||||
nt.Insert(myVpnNet)
|
|
||||||
cs := &CertState{
|
|
||||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
|
||||||
myVpnNetworksTable: nt,
|
|
||||||
}
|
|
||||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
|
||||||
lh.ifce = &mockEncWriter{}
|
|
||||||
require.NoError(t, err)
|
|
||||||
lhh := lh.NewRequestHandler()
|
lhh := lh.NewRequestHandler()
|
||||||
|
|
||||||
// Test that my first update responds with just that
|
// Test that my first update responds with just that
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr1, myUdpAddr2}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr1, myUdpAddr2}, lhh)
|
||||||
r := newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r := newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, myUdpAddr1, myUdpAddr2)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr2)
|
||||||
|
|
||||||
// Ensure we don't accumulate addresses
|
// Ensure we don't accumulate addresses
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr3}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr3}, lhh)
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, myUdpAddr3)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr3)
|
||||||
|
|
||||||
// Grow it back to 2
|
// Grow it back to 2
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr1, myUdpAddr4}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{myUdpAddr1, myUdpAddr4}, lhh)
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, myUdpAddr1, myUdpAddr4)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
|
||||||
|
|
||||||
// Update a different host and ask about it
|
// Update a different host and ask about it
|
||||||
newLHHostUpdate(theirUdpAddr0, theirVpnIp, []netip.AddrPort{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh)
|
newLHHostUpdate(theirUdpAddr0, theirVpnIp, []netip.AddrPort{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh)
|
||||||
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, theirVpnIp, lhh)
|
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, theirVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
|
||||||
|
|
||||||
// Have both hosts ask about the other
|
// Have both hosts ask about the other
|
||||||
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, myUdpAddr1, myUdpAddr4)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
|
||||||
|
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, theirVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, theirVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
|
||||||
|
|
||||||
// Make sure we didn't get changed
|
// Make sure we didn't get changed
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, myUdpAddr1, myUdpAddr4)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
|
||||||
|
|
||||||
// Ensure proper ordering and limiting
|
// Ensure proper ordering and limiting
|
||||||
// Send 12 addrs, get 10 back, the last 2 removed, allowing the duplicate to remain (clients dedupe)
|
// Send 12 addrs, get 10 back, the last 2 removed, allowing the duplicate to remain (clients dedupe)
|
||||||
@ -261,7 +255,7 @@ func TestLighthouse_Memory(t *testing.T) {
|
|||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(
|
assertIp4InArray(
|
||||||
t,
|
t,
|
||||||
r.msg.Details.V4AddrPorts,
|
r.msg.Details.Ip4AndPorts,
|
||||||
myUdpAddr1, myUdpAddr2, myUdpAddr3, myUdpAddr4, myUdpAddr5, myUdpAddr5, myUdpAddr6, myUdpAddr7, myUdpAddr8, myUdpAddr9,
|
myUdpAddr1, myUdpAddr2, myUdpAddr3, myUdpAddr4, myUdpAddr5, myUdpAddr5, myUdpAddr6, myUdpAddr7, myUdpAddr8, myUdpAddr9,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -271,50 +265,38 @@ func TestLighthouse_Memory(t *testing.T) {
|
|||||||
good := netip.MustParseAddrPort("1.128.0.99:4242")
|
good := netip.MustParseAddrPort("1.128.0.99:4242")
|
||||||
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{bad1, bad2, good}, lhh)
|
newLHHostUpdate(myUdpAddr0, myVpnIp, []netip.AddrPort{bad1, bad2, good}, lhh)
|
||||||
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
|
||||||
assertIp4InArray(t, r.msg.Details.V4AddrPorts, good)
|
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLighthouse_reload(t *testing.T) {
|
func TestLighthouse_reload(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
c := config.NewC(l)
|
c := config.NewC(l)
|
||||||
c.Settings["lighthouse"] = map[string]any{"am_lighthouse": true}
|
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||||
c.Settings["listen"] = map[string]any{"port": 4242}
|
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||||
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, netip.MustParsePrefix("10.128.0.1/24"), nil, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
nc := map[interface{}]interface{}{
|
||||||
nt := new(bart.Lite)
|
"static_host_map": map[interface{}]interface{}{
|
||||||
nt.Insert(myVpnNet)
|
"10.128.0.2": []interface{}{"1.1.1.1:4242"},
|
||||||
cs := &CertState{
|
|
||||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
|
||||||
myVpnNetworksTable: nt,
|
|
||||||
}
|
|
||||||
|
|
||||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
nc := map[string]any{
|
|
||||||
"static_host_map": map[string]any{
|
|
||||||
"10.128.0.2": []any{"1.1.1.1:4242"},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rc, err := yaml.Marshal(nc)
|
rc, err := yaml.Marshal(nc)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.ReloadConfigString(string(rc))
|
c.ReloadConfigString(string(rc))
|
||||||
|
|
||||||
err = lh.reload(c, false)
|
err = lh.reload(c, false)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLHHostRequest(fromAddr netip.AddrPort, myVpnIp, queryVpnIp netip.Addr, lhh *LightHouseHandler) testLhReply {
|
func newLHHostRequest(fromAddr netip.AddrPort, myVpnIp, queryVpnIp netip.Addr, lhh *LightHouseHandler) testLhReply {
|
||||||
|
//TODO: IPV6-WORK
|
||||||
|
bip := queryVpnIp.As4()
|
||||||
req := &NebulaMeta{
|
req := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostQuery,
|
Type: NebulaMeta_HostQuery,
|
||||||
Details: &NebulaMetaDetails{},
|
Details: &NebulaMetaDetails{
|
||||||
}
|
VpnIp: binary.BigEndian.Uint32(bip[:]),
|
||||||
|
},
|
||||||
if queryVpnIp.Is4() {
|
|
||||||
bip := queryVpnIp.As4()
|
|
||||||
req.Details.OldVpnAddr = binary.BigEndian.Uint32(bip[:])
|
|
||||||
} else {
|
|
||||||
req.Details.VpnAddr = netAddrToProtoAddr(queryVpnIp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := req.Marshal()
|
b, err := req.Marshal()
|
||||||
@ -326,29 +308,23 @@ func newLHHostRequest(fromAddr netip.AddrPort, myVpnIp, queryVpnIp netip.Addr, l
|
|||||||
w := &testEncWriter{
|
w := &testEncWriter{
|
||||||
metaFilter: &filter,
|
metaFilter: &filter,
|
||||||
}
|
}
|
||||||
lhh.HandleRequest(fromAddr, []netip.Addr{myVpnIp}, b, w)
|
lhh.HandleRequest(fromAddr, myVpnIp, b, w)
|
||||||
return w.lastReply
|
return w.lastReply
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLHHostUpdate(fromAddr netip.AddrPort, vpnIp netip.Addr, addrs []netip.AddrPort, lhh *LightHouseHandler) {
|
func newLHHostUpdate(fromAddr netip.AddrPort, vpnIp netip.Addr, addrs []netip.AddrPort, lhh *LightHouseHandler) {
|
||||||
|
//TODO: IPV6-WORK
|
||||||
|
bip := vpnIp.As4()
|
||||||
req := &NebulaMeta{
|
req := &NebulaMeta{
|
||||||
Type: NebulaMeta_HostUpdateNotification,
|
Type: NebulaMeta_HostUpdateNotification,
|
||||||
Details: &NebulaMetaDetails{},
|
Details: &NebulaMetaDetails{
|
||||||
|
VpnIp: binary.BigEndian.Uint32(bip[:]),
|
||||||
|
Ip4AndPorts: make([]*Ip4AndPort, len(addrs)),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if vpnIp.Is4() {
|
for k, v := range addrs {
|
||||||
bip := vpnIp.As4()
|
req.Details.Ip4AndPorts[k] = NewIp4AndPortFromNetIP(v.Addr(), v.Port())
|
||||||
req.Details.OldVpnAddr = binary.BigEndian.Uint32(bip[:])
|
|
||||||
} else {
|
|
||||||
req.Details.VpnAddr = netAddrToProtoAddr(vpnIp)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range addrs {
|
|
||||||
if v.Addr().Is4() {
|
|
||||||
req.Details.V4AddrPorts = append(req.Details.V4AddrPorts, netAddrToProtoV4AddrPort(v.Addr(), v.Port()))
|
|
||||||
} else {
|
|
||||||
req.Details.V6AddrPorts = append(req.Details.V6AddrPorts, netAddrToProtoV6AddrPort(v.Addr(), v.Port()))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := req.Marshal()
|
b, err := req.Marshal()
|
||||||
@ -357,9 +333,75 @@ func newLHHostUpdate(fromAddr netip.AddrPort, vpnIp netip.Addr, addrs []netip.Ad
|
|||||||
}
|
}
|
||||||
|
|
||||||
w := &testEncWriter{}
|
w := &testEncWriter{}
|
||||||
lhh.HandleRequest(fromAddr, []netip.Addr{vpnIp}, b, w)
|
lhh.HandleRequest(fromAddr, vpnIp, b, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO: this is a RemoteList test
|
||||||
|
//func Test_lhRemoteAllowList(t *testing.T) {
|
||||||
|
// l := NewLogger()
|
||||||
|
// c := NewConfig(l)
|
||||||
|
// c.Settings["remoteallowlist"] = map[interface{}]interface{}{
|
||||||
|
// "10.20.0.0/12": false,
|
||||||
|
// }
|
||||||
|
// allowList, err := c.GetAllowList("remoteallowlist", false)
|
||||||
|
// assert.Nil(t, err)
|
||||||
|
//
|
||||||
|
// lh1 := "10.128.0.2"
|
||||||
|
// lh1IP := net.ParseIP(lh1)
|
||||||
|
//
|
||||||
|
// udpServer, _ := NewListener(l, "0.0.0.0", 0, true)
|
||||||
|
//
|
||||||
|
// lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, []uint32{ip2int(lh1IP)}, 10, 10003, udpServer, false, 1, false)
|
||||||
|
// lh.SetRemoteAllowList(allowList)
|
||||||
|
//
|
||||||
|
// // A disallowed ip should not enter the cache but we should end up with an empty entry in the addrMap
|
||||||
|
// remote1IP := net.ParseIP("10.20.0.3")
|
||||||
|
// remotes := lh.unlockedGetRemoteList(ip2int(remote1IP))
|
||||||
|
// remotes.unlockedPrependV4(ip2int(remote1IP), NewIp4AndPort(remote1IP, 4242))
|
||||||
|
// assert.NotNil(t, lh.addrMap[ip2int(remote1IP)])
|
||||||
|
// assert.Empty(t, lh.addrMap[ip2int(remote1IP)].CopyAddrs([]*net.IPNet{}))
|
||||||
|
//
|
||||||
|
// // Make sure a good ip enters the cache and addrMap
|
||||||
|
// remote2IP := net.ParseIP("10.128.0.3")
|
||||||
|
// remote2UDPAddr := NewUDPAddr(remote2IP, uint16(4242))
|
||||||
|
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote2UDPAddr.IP, uint32(remote2UDPAddr.Port)), false, false)
|
||||||
|
// assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr)
|
||||||
|
//
|
||||||
|
// // Another good ip gets into the cache, ordering is inverted
|
||||||
|
// remote3IP := net.ParseIP("10.128.0.4")
|
||||||
|
// remote3UDPAddr := NewUDPAddr(remote3IP, uint16(4243))
|
||||||
|
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote3UDPAddr.IP, uint32(remote3UDPAddr.Port)), false, false)
|
||||||
|
// assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr, remote3UDPAddr)
|
||||||
|
//
|
||||||
|
// // If we exceed the length limit we should only have the most recent addresses
|
||||||
|
// addedAddrs := []*udpAddr{}
|
||||||
|
// for i := 0; i < 11; i++ {
|
||||||
|
// remoteUDPAddr := NewUDPAddr(net.IP{10, 128, 0, 4}, uint16(4243+i))
|
||||||
|
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remoteUDPAddr.IP, uint32(remoteUDPAddr.Port)), false, false)
|
||||||
|
// // The first entry here is a duplicate, don't add it to the assert list
|
||||||
|
// if i != 0 {
|
||||||
|
// addedAddrs = append(addedAddrs, remoteUDPAddr)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // We should only have the last 10 of what we tried to add
|
||||||
|
// assert.True(t, len(addedAddrs) >= 10, "We should have tried to add at least 10 addresses")
|
||||||
|
// assertUdpAddrInArray(
|
||||||
|
// t,
|
||||||
|
// lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}),
|
||||||
|
// addedAddrs[0],
|
||||||
|
// addedAddrs[1],
|
||||||
|
// addedAddrs[2],
|
||||||
|
// addedAddrs[3],
|
||||||
|
// addedAddrs[4],
|
||||||
|
// addedAddrs[5],
|
||||||
|
// addedAddrs[6],
|
||||||
|
// addedAddrs[7],
|
||||||
|
// addedAddrs[8],
|
||||||
|
// addedAddrs[9],
|
||||||
|
// )
|
||||||
|
//}
|
||||||
|
|
||||||
type testLhReply struct {
|
type testLhReply struct {
|
||||||
nebType header.MessageType
|
nebType header.MessageType
|
||||||
nebSubType header.MessageSubType
|
nebSubType header.MessageSubType
|
||||||
@ -368,9 +410,8 @@ type testLhReply struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type testEncWriter struct {
|
type testEncWriter struct {
|
||||||
lastReply testLhReply
|
lastReply testLhReply
|
||||||
metaFilter *NebulaMeta_MessageType
|
metaFilter *NebulaMeta_MessageType
|
||||||
protocolVersion cert.Version
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
func (tw *testEncWriter) SendVia(via *HostInfo, relay *Relay, ad, nb, out []byte, nocopy bool) {
|
||||||
@ -385,7 +426,7 @@ func (tw *testEncWriter) SendMessageToHostInfo(t header.MessageType, st header.M
|
|||||||
tw.lastReply = testLhReply{
|
tw.lastReply = testLhReply{
|
||||||
nebType: t,
|
nebType: t,
|
||||||
nebSubType: st,
|
nebSubType: st,
|
||||||
vpnIp: hostinfo.vpnAddrs[0],
|
vpnIp: hostinfo.vpnIp,
|
||||||
msg: msg,
|
msg: msg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -395,7 +436,7 @@ func (tw *testEncWriter) SendMessageToHostInfo(t header.MessageType, st header.M
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) SendMessageToVpnAddr(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, _, _ []byte) {
|
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, _, _ []byte) {
|
||||||
msg := &NebulaMeta{}
|
msg := &NebulaMeta{}
|
||||||
err := msg.Unmarshal(p)
|
err := msg.Unmarshal(p)
|
||||||
if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
|
if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
|
||||||
@ -412,84 +453,17 @@ func (tw *testEncWriter) SendMessageToVpnAddr(t header.MessageType, st header.Me
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *testEncWriter) GetHostInfo(vpnIp netip.Addr) *HostInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tw *testEncWriter) GetCertState() *CertState {
|
|
||||||
return &CertState{initiatingVersion: tw.protocolVersion}
|
|
||||||
}
|
|
||||||
|
|
||||||
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
|
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
|
||||||
func assertIp4InArray(t *testing.T, have []*V4AddrPort, want ...netip.AddrPort) {
|
func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...netip.AddrPort) {
|
||||||
if !assert.Len(t, have, len(want)) {
|
if !assert.Len(t, have, len(want)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, w := range want {
|
for k, w := range want {
|
||||||
h := protoV4AddrPortToNetAddrPort(have[k])
|
//TODO: IPV6-WORK
|
||||||
|
h := AddrPortFromIp4AndPort(have[k])
|
||||||
if !(h == w) {
|
if !(h == w) {
|
||||||
assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v, found %v", w, k, h))
|
assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v, found %v", w, k, h))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_findNetworkUnion(t *testing.T) {
|
|
||||||
var out netip.Addr
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
tenDot := netip.MustParsePrefix("10.0.0.0/8")
|
|
||||||
oneSevenTwo := netip.MustParsePrefix("172.16.0.0/16")
|
|
||||||
fe80 := netip.MustParsePrefix("fe80::/8")
|
|
||||||
fc00 := netip.MustParsePrefix("fc00::/7")
|
|
||||||
|
|
||||||
a1 := netip.MustParseAddr("10.0.0.1")
|
|
||||||
afe81 := netip.MustParseAddr("fe80::1")
|
|
||||||
|
|
||||||
//simple
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{tenDot}, []netip.Addr{a1})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, a1)
|
|
||||||
|
|
||||||
//mixed lengths
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{tenDot}, []netip.Addr{a1, afe81})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, a1)
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{tenDot, oneSevenTwo}, []netip.Addr{a1})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, a1)
|
|
||||||
|
|
||||||
//mixed family
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{tenDot, oneSevenTwo, fe80}, []netip.Addr{a1})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, a1)
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{tenDot, oneSevenTwo, fe80}, []netip.Addr{a1, afe81})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, a1)
|
|
||||||
|
|
||||||
//ordering
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{tenDot, oneSevenTwo, fe80}, []netip.Addr{afe81, a1})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, a1)
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{fe80, tenDot, oneSevenTwo}, []netip.Addr{afe81, a1})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, afe81)
|
|
||||||
|
|
||||||
//some mismatches
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{tenDot, oneSevenTwo, fe80}, []netip.Addr{afe81})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, afe81)
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{oneSevenTwo, fe80}, []netip.Addr{a1, afe81})
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, out, afe81)
|
|
||||||
|
|
||||||
//falsey cases
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{oneSevenTwo, fe80}, []netip.Addr{a1})
|
|
||||||
assert.False(t, ok)
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{fc00, fe80}, []netip.Addr{a1})
|
|
||||||
assert.False(t, ok)
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{oneSevenTwo, fc00}, []netip.Addr{a1, afe81})
|
|
||||||
assert.False(t, ok)
|
|
||||||
out, ok = findNetworkUnion([]netip.Prefix{fc00}, []netip.Addr{a1, afe81})
|
|
||||||
assert.False(t, ok)
|
|
||||||
}
|
|
||||||
|
|||||||
82
main.go
82
main.go
@ -2,6 +2,7 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
@ -13,10 +14,10 @@ import (
|
|||||||
"github.com/slackhq/nebula/sshd"
|
"github.com/slackhq/nebula/sshd"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/util"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type m = map[string]any
|
type m map[string]interface{}
|
||||||
|
|
||||||
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, deviceFactory overlay.DeviceFactory) (retcon *Control, reterr error) {
|
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, deviceFactory overlay.DeviceFactory) (retcon *Control, reterr error) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
@ -60,12 +61,25 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
return nil, util.ContextualizeIfNeeded("Failed to load PKI from config", err)
|
return nil, util.ContextualizeIfNeeded("Failed to load PKI from config", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fw, err := NewFirewallFromConfig(l, pki.getCertState(), c)
|
certificate := pki.GetCertState().Certificate
|
||||||
|
fw, err := NewFirewallFromConfig(l, certificate, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Error while loading firewall rules", err)
|
return nil, util.ContextualizeIfNeeded("Error while loading firewall rules", err)
|
||||||
}
|
}
|
||||||
l.WithField("firewallHashes", fw.GetRuleHashes()).Info("Firewall started")
|
l.WithField("firewallHashes", fw.GetRuleHashes()).Info("Firewall started")
|
||||||
|
|
||||||
|
ones, _ := certificate.Details.Ips[0].Mask.Size()
|
||||||
|
addr, ok := netip.AddrFromSlice(certificate.Details.Ips[0].IP)
|
||||||
|
if !ok {
|
||||||
|
err = util.NewContextualError(
|
||||||
|
"Invalid ip address in certificate",
|
||||||
|
m{"vpnIp": certificate.Details.Ips[0].IP},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tunCidr := netip.PrefixFrom(addr, ones)
|
||||||
|
|
||||||
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Error while creating SSH server", err)
|
return nil, util.ContextualizeIfNeeded("Error while creating SSH server", err)
|
||||||
@ -128,7 +142,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
deviceFactory = overlay.NewDeviceFromConfig
|
deviceFactory = overlay.NewDeviceFromConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
tun, err = deviceFactory(c, l, pki.getCertState().myVpnNetworks, routines)
|
tun, err = deviceFactory(c, l, tunCidr, routines)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to get a tun/tap device", err)
|
return nil, util.ContextualizeIfNeeded("Failed to get a tun/tap device", err)
|
||||||
}
|
}
|
||||||
@ -183,10 +197,10 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hostMap := NewHostMapFromConfig(l, c)
|
hostMap := NewHostMapFromConfig(l, tunCidr, c)
|
||||||
punchy := NewPunchyFromConfig(l, c)
|
punchy := NewPunchyFromConfig(l, c)
|
||||||
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
|
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
|
||||||
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy)
|
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
||||||
}
|
}
|
||||||
@ -222,29 +236,40 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
|
|
||||||
ifConfig := &InterfaceConfig{
|
ifConfig := &InterfaceConfig{
|
||||||
HostMap: hostMap,
|
HostMap: hostMap,
|
||||||
Inside: tun,
|
Inside: tun,
|
||||||
Outside: udpConns[0],
|
Outside: udpConns[0],
|
||||||
pki: pki,
|
pki: pki,
|
||||||
Firewall: fw,
|
Cipher: c.GetString("cipher", "aes"),
|
||||||
ServeDns: serveDns,
|
Firewall: fw,
|
||||||
HandshakeManager: handshakeManager,
|
ServeDns: serveDns,
|
||||||
connectionManager: connManager,
|
HandshakeManager: handshakeManager,
|
||||||
lightHouse: lightHouse,
|
connectionManager: connManager,
|
||||||
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
lightHouse: lightHouse,
|
||||||
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||||
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||||
routines: routines,
|
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||||
MessageMetrics: messageMetrics,
|
routines: routines,
|
||||||
version: buildVersion,
|
MessageMetrics: messageMetrics,
|
||||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
version: buildVersion,
|
||||||
punchy: punchy,
|
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||||
|
punchy: punchy,
|
||||||
|
|
||||||
ConntrackCacheTimeout: conntrackCacheTimeout,
|
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch ifConfig.Cipher {
|
||||||
|
case "aes":
|
||||||
|
noiseEndianness = binary.BigEndian
|
||||||
|
case "chachapoly":
|
||||||
|
noiseEndianness = binary.LittleEndian
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown cipher: %v", ifConfig.Cipher)
|
||||||
|
}
|
||||||
|
|
||||||
var ifce *Interface
|
var ifce *Interface
|
||||||
if !configTest {
|
if !configTest {
|
||||||
ifce, err = NewInterface(ctx, ifConfig)
|
ifce, err = NewInterface(ctx, ifConfig)
|
||||||
@ -252,6 +277,8 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
return nil, fmt.Errorf("failed to initialize interface: %s", err)
|
return nil, fmt.Errorf("failed to initialize interface: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Better way to attach these, probably want a new interface in InterfaceConfig
|
||||||
|
// I don't want to make this initial commit too far-reaching though
|
||||||
ifce.writers = udpConns
|
ifce.writers = udpConns
|
||||||
lightHouse.ifce = ifce
|
lightHouse.ifce = ifce
|
||||||
|
|
||||||
@ -263,6 +290,8 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
go handshakeManager.Run(ctx)
|
go handshakeManager.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO - stats third-party modules start uncancellable goroutines. Update those libs to accept
|
||||||
|
// a context so that they can exit when the context is Done.
|
||||||
statsStart, err := startStats(l, c, buildVersion, configTest)
|
statsStart, err := startStats(l, c, buildVersion, configTest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to start stats emitter", err)
|
return nil, util.ContextualizeIfNeeded("Failed to start stats emitter", err)
|
||||||
@ -272,6 +301,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO: check if we _should_ be emitting stats
|
||||||
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
||||||
|
|
||||||
attachCommands(l, c, ssh, ifce)
|
attachCommands(l, c, ssh, ifce)
|
||||||
@ -280,7 +310,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
var dnsStart func()
|
var dnsStart func()
|
||||||
if lightHouse.amLighthouse && serveDns {
|
if lightHouse.amLighthouse && serveDns {
|
||||||
l.Debugln("Starting dns server")
|
l.Debugln("Starting dns server")
|
||||||
dnsStart = dnsMain(l, pki.getCertState(), hostMap, c)
|
dnsStart = dnsMain(l, hostMap, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Control{
|
return &Control{
|
||||||
|
|||||||
@ -7,6 +7,8 @@ import (
|
|||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//TODO: this can probably move into the header package
|
||||||
|
|
||||||
type MessageMetrics struct {
|
type MessageMetrics struct {
|
||||||
rx [][]metrics.Counter
|
rx [][]metrics.Counter
|
||||||
tx [][]metrics.Counter
|
tx [][]metrics.Counter
|
||||||
|
|||||||
18
metadata.go
Normal file
18
metadata.go
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
import (
|
||||||
|
proto "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func HandleMetaProto(p []byte) {
|
||||||
|
m := &NebulaMeta{}
|
||||||
|
err := proto.Unmarshal(p, m)
|
||||||
|
if err != nil {
|
||||||
|
l.Debugf("problem unmarshaling meta message: %s", err)
|
||||||
|
}
|
||||||
|
//fmt.Println(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
847
nebula.pb.go
847
nebula.pb.go
File diff suppressed because it is too large
Load Diff
32
nebula.proto
32
nebula.proto
@ -23,28 +23,19 @@ message NebulaMeta {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message NebulaMetaDetails {
|
message NebulaMetaDetails {
|
||||||
uint32 OldVpnAddr = 1 [deprecated = true];
|
uint32 VpnIp = 1;
|
||||||
Addr VpnAddr = 6;
|
repeated Ip4AndPort Ip4AndPorts = 2;
|
||||||
|
repeated Ip6AndPort Ip6AndPorts = 4;
|
||||||
repeated uint32 OldRelayVpnAddrs = 5 [deprecated = true];
|
repeated uint32 RelayVpnIp = 5;
|
||||||
repeated Addr RelayVpnAddrs = 7;
|
|
||||||
|
|
||||||
repeated V4AddrPort V4AddrPorts = 2;
|
|
||||||
repeated V6AddrPort V6AddrPorts = 4;
|
|
||||||
uint32 counter = 3;
|
uint32 counter = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Addr {
|
message Ip4AndPort {
|
||||||
uint64 Hi = 1;
|
uint32 Ip = 1;
|
||||||
uint64 Lo = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message V4AddrPort {
|
|
||||||
uint32 Addr = 1;
|
|
||||||
uint32 Port = 2;
|
uint32 Port = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message V6AddrPort {
|
message Ip6AndPort {
|
||||||
uint64 Hi = 1;
|
uint64 Hi = 1;
|
||||||
uint64 Lo = 2;
|
uint64 Lo = 2;
|
||||||
uint32 Port = 3;
|
uint32 Port = 3;
|
||||||
@ -71,7 +62,6 @@ message NebulaHandshakeDetails {
|
|||||||
uint32 ResponderIndex = 3;
|
uint32 ResponderIndex = 3;
|
||||||
uint64 Cookie = 4;
|
uint64 Cookie = 4;
|
||||||
uint64 Time = 5;
|
uint64 Time = 5;
|
||||||
uint32 CertVersion = 8;
|
|
||||||
// reserved for WIP multiport
|
// reserved for WIP multiport
|
||||||
reserved 6, 7;
|
reserved 6, 7;
|
||||||
}
|
}
|
||||||
@ -86,10 +76,6 @@ message NebulaControl {
|
|||||||
|
|
||||||
uint32 InitiatorRelayIndex = 2;
|
uint32 InitiatorRelayIndex = 2;
|
||||||
uint32 ResponderRelayIndex = 3;
|
uint32 ResponderRelayIndex = 3;
|
||||||
|
uint32 RelayToIp = 4;
|
||||||
uint32 OldRelayToAddr = 4 [deprecated = true];
|
uint32 RelayFromIp = 5;
|
||||||
uint32 OldRelayFromAddr = 5 [deprecated = true];
|
|
||||||
|
|
||||||
Addr RelayToAddr = 6;
|
|
||||||
Addr RelayFromAddr = 7;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,50 +0,0 @@
|
|||||||
package noiseutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdh"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/pkclient"
|
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DHP256PKCS11 is the NIST P-256 ECDH function
|
|
||||||
var DHP256PKCS11 noise.DHFunc = newNISTP11Curve("P256", ecdh.P256(), 32)
|
|
||||||
|
|
||||||
type nistP11Curve struct {
|
|
||||||
nistCurve
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNISTP11Curve(name string, curve ecdh.Curve, byteLen int) nistP11Curve {
|
|
||||||
return nistP11Curve{
|
|
||||||
newNISTCurve(name, curve, byteLen),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c nistP11Curve) DH(privkey, pubkey []byte) ([]byte, error) {
|
|
||||||
//for this function "privkey" is actually a pkcs11 URI
|
|
||||||
pkStr := string(privkey)
|
|
||||||
|
|
||||||
//to set up a handshake, we need to also do non-pkcs11-DH. Handle that here.
|
|
||||||
if !strings.HasPrefix(pkStr, "pkcs11:") {
|
|
||||||
return DHP256.DH(privkey, pubkey)
|
|
||||||
}
|
|
||||||
ecdhPubKey, err := c.curve.NewPublicKey(pubkey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to unmarshal pubkey: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
//this is not the most performant way to do this (a long-lived client would be better)
|
|
||||||
//but, it works, and helps avoid problems with stale sessions and HSMs used by multiple users.
|
|
||||||
client, err := pkclient.FromUrl(pkStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func(client *pkclient.PKClient) {
|
|
||||||
_ = client.Close()
|
|
||||||
}(client)
|
|
||||||
|
|
||||||
return client.DeriveNoise(ecdhPubKey.Bytes())
|
|
||||||
}
|
|
||||||
287
outside.go
287
outside.go
@ -3,25 +3,46 @@ package nebula
|
|||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/gopacket/layers"
|
"github.com/flynn/noise"
|
||||||
"golang.org/x/net/ipv6"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
minFwPacketLen = 4
|
minFwPacketLen = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf *LightHouseHandler, nb []byte, q int, localCache firewall.ConntrackCache) {
|
// TODO: IPV6-WORK this can likely be removed now
|
||||||
|
func readOutsidePackets(f *Interface) udp.EncReader {
|
||||||
|
return func(
|
||||||
|
addr netip.AddrPort,
|
||||||
|
out []byte,
|
||||||
|
packet []byte,
|
||||||
|
header *header.H,
|
||||||
|
fwPacket *firewall.Packet,
|
||||||
|
lhh udp.LightHouseHandlerFunc,
|
||||||
|
nb []byte,
|
||||||
|
q int,
|
||||||
|
localCache firewall.ConntrackCache,
|
||||||
|
) {
|
||||||
|
f.readOutsidePackets(addr, nil, out, packet, header, fwPacket, lhh, nb, q, localCache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
|
||||||
err := h.Parse(packet)
|
err := h.Parse(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// TODO: best if we return this and let caller log
|
||||||
|
// TODO: Might be better to send the literal []byte("holepunch") packet and ignore that?
|
||||||
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
||||||
if len(packet) > 1 {
|
if len(packet) > 1 {
|
||||||
f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", ip, err)
|
f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", ip, err)
|
||||||
@ -31,7 +52,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
|
|
||||||
//l.Error("in packet ", header, packet[HeaderLen:])
|
//l.Error("in packet ", header, packet[HeaderLen:])
|
||||||
if ip.IsValid() {
|
if ip.IsValid() {
|
||||||
if f.myVpnNetworksTable.Contains(ip.Addr()) {
|
if f.myVpnNet.Contains(ip.Addr()) {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet")
|
f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet")
|
||||||
}
|
}
|
||||||
@ -88,7 +109,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
if !ok {
|
if !ok {
|
||||||
// The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing
|
// The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing
|
||||||
// its internal mapping. This should never happen.
|
// its internal mapping. This should never happen.
|
||||||
hostinfo.logger(f.l).WithFields(logrus.Fields{"vpnAddrs": hostinfo.vpnAddrs, "remoteIndex": h.RemoteIndex}).Error("HostInfo missing remote relay index")
|
hostinfo.logger(f.l).WithFields(logrus.Fields{"vpnIp": hostinfo.vpnIp, "remoteIndex": h.RemoteIndex}).Error("HostInfo missing remote relay index")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,9 +121,9 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
return
|
return
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
// Find the target HostInfo relay object
|
// Find the target HostInfo relay object
|
||||||
targetHI, targetRelay, err := f.hostMap.QueryVpnAddrsRelayFor(hostinfo.vpnAddrs, relay.PeerAddr)
|
targetHI, targetRelay, err := f.hostMap.QueryVpnIpRelayFor(hostinfo.vpnIp, relay.PeerIp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithField("relayTo", relay.PeerAddr).WithError(err).WithField("hostinfo.vpnAddrs", hostinfo.vpnAddrs).Info("Failed to find target host info by ip")
|
hostinfo.logger(f.l).WithField("relayTo", relay.PeerIp).WithError(err).Info("Failed to find target host info by ip")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +139,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal")
|
hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
hostinfo.logger(f.l).WithFields(logrus.Fields{"relayTo": relay.PeerAddr, "relayFrom": hostinfo.vpnAddrs[0], "targetRelayState": targetRelay.State}).Info("Unexpected target relay state")
|
hostinfo.logger(f.l).WithFields(logrus.Fields{"relayTo": relay.PeerIp, "relayFrom": hostinfo.vpnIp, "targetRelayState": targetRelay.State}).Info("Unexpected target relay state")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -135,10 +156,13 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
||||||
WithField("packet", packet).
|
WithField("packet", packet).
|
||||||
Error("Failed to decrypt lighthouse packet")
|
Error("Failed to decrypt lighthouse packet")
|
||||||
|
|
||||||
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
||||||
|
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lhf.HandleRequest(ip, hostinfo.vpnAddrs, d, f)
|
lhf(ip, hostinfo.vpnIp, d)
|
||||||
|
|
||||||
// Fallthrough to the bottom to record incoming traffic
|
// Fallthrough to the bottom to record incoming traffic
|
||||||
|
|
||||||
@ -153,6 +177,9 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
||||||
WithField("packet", packet).
|
WithField("packet", packet).
|
||||||
Error("Failed to decrypt test packet")
|
Error("Failed to decrypt test packet")
|
||||||
|
|
||||||
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
||||||
|
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,8 +229,14 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
Error("Failed to decrypt Control packet")
|
Error("Failed to decrypt Control packet")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
m := &NebulaControl{}
|
||||||
|
err = m.Unmarshal(d)
|
||||||
|
if err != nil {
|
||||||
|
hostinfo.logger(f.l).WithError(err).Error("Failed to unmarshal control message")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
f.relayManager.HandleControlMsg(hostinfo, d, f)
|
f.relayManager.HandleControlMsg(hostinfo, m, f)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||||
@ -220,8 +253,8 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
func (f *Interface) closeTunnel(hostInfo *HostInfo) {
|
func (f *Interface) closeTunnel(hostInfo *HostInfo) {
|
||||||
final := f.hostMap.DeleteHostInfo(hostInfo)
|
final := f.hostMap.DeleteHostInfo(hostInfo)
|
||||||
if final {
|
if final {
|
||||||
// We no longer have any tunnels with this vpn addr, clear learned lighthouse state to lower memory usage
|
// We no longer have any tunnels with this vpn ip, clear learned lighthouse state to lower memory usage
|
||||||
f.lightHouse.DeleteVpnAddrs(hostInfo.vpnAddrs)
|
f.lightHouse.DeleteVpnIp(hostInfo.vpnIp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,26 +263,25 @@ func (f *Interface) sendCloseTunnel(h *HostInfo) {
|
|||||||
f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, udpAddr netip.AddrPort) {
|
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, ip netip.AddrPort) {
|
||||||
if udpAddr.IsValid() && hostinfo.remote != udpAddr {
|
if ip.IsValid() && hostinfo.remote != ip {
|
||||||
if !f.lightHouse.GetRemoteAllowList().AllowAll(hostinfo.vpnAddrs, udpAddr.Addr()) {
|
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, ip.Addr()) {
|
||||||
hostinfo.logger(f.l).WithField("newAddr", udpAddr).Debug("lighthouse.remote_allow_list denied roaming")
|
hostinfo.logger(f.l).WithField("newAddr", ip).Debug("lighthouse.remote_allow_list denied roaming")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if !hostinfo.lastRoam.IsZero() && ip == hostinfo.lastRoamRemote && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second {
|
||||||
if !hostinfo.lastRoam.IsZero() && udpAddr == hostinfo.lastRoamRemote && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second {
|
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", udpAddr).
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", ip).
|
||||||
Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds)
|
Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", udpAddr).
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", ip).
|
||||||
Info("Host roamed to new udp ip/port.")
|
Info("Host roamed to new udp ip/port.")
|
||||||
hostinfo.lastRoam = time.Now()
|
hostinfo.lastRoam = time.Now()
|
||||||
hostinfo.lastRoamRemote = hostinfo.remote
|
hostinfo.lastRoamRemote = hostinfo.remote
|
||||||
hostinfo.SetRemote(udpAddr)
|
hostinfo.SetRemote(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -269,140 +301,24 @@ func (f *Interface) handleEncrypted(ci *ConnectionState, addr netip.AddrPort, h
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
ErrPacketTooShort = errors.New("packet is too short")
|
|
||||||
ErrUnknownIPVersion = errors.New("packet is an unknown ip version")
|
|
||||||
ErrIPv4InvalidHeaderLength = errors.New("invalid ipv4 header length")
|
|
||||||
ErrIPv4PacketTooShort = errors.New("ipv4 packet is too short")
|
|
||||||
ErrIPv6PacketTooShort = errors.New("ipv6 packet is too short")
|
|
||||||
ErrIPv6CouldNotFindPayload = errors.New("could not find payload in ipv6 packet")
|
|
||||||
)
|
|
||||||
|
|
||||||
// newPacket validates and parses the interesting bits for the firewall out of the ip and sub protocol headers
|
// newPacket validates and parses the interesting bits for the firewall out of the ip and sub protocol headers
|
||||||
func newPacket(data []byte, incoming bool, fp *firewall.Packet) error {
|
func newPacket(data []byte, incoming bool, fp *firewall.Packet) error {
|
||||||
if len(data) < 1 {
|
|
||||||
return ErrPacketTooShort
|
|
||||||
}
|
|
||||||
|
|
||||||
version := int((data[0] >> 4) & 0x0f)
|
|
||||||
switch version {
|
|
||||||
case ipv4.Version:
|
|
||||||
return parseV4(data, incoming, fp)
|
|
||||||
case ipv6.Version:
|
|
||||||
return parseV6(data, incoming, fp)
|
|
||||||
}
|
|
||||||
return ErrUnknownIPVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
|
|
||||||
dataLen := len(data)
|
|
||||||
if dataLen < ipv6.HeaderLen {
|
|
||||||
return ErrIPv6PacketTooShort
|
|
||||||
}
|
|
||||||
|
|
||||||
if incoming {
|
|
||||||
fp.RemoteAddr, _ = netip.AddrFromSlice(data[8:24])
|
|
||||||
fp.LocalAddr, _ = netip.AddrFromSlice(data[24:40])
|
|
||||||
} else {
|
|
||||||
fp.LocalAddr, _ = netip.AddrFromSlice(data[8:24])
|
|
||||||
fp.RemoteAddr, _ = netip.AddrFromSlice(data[24:40])
|
|
||||||
}
|
|
||||||
|
|
||||||
protoAt := 6 // NextHeader is at 6 bytes into the ipv6 header
|
|
||||||
offset := ipv6.HeaderLen // Start at the end of the ipv6 header
|
|
||||||
next := 0
|
|
||||||
for {
|
|
||||||
if protoAt >= dataLen {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
proto := layers.IPProtocol(data[protoAt])
|
|
||||||
|
|
||||||
switch proto {
|
|
||||||
case layers.IPProtocolICMPv6, layers.IPProtocolESP, layers.IPProtocolNoNextHeader:
|
|
||||||
fp.Protocol = uint8(proto)
|
|
||||||
fp.RemotePort = 0
|
|
||||||
fp.LocalPort = 0
|
|
||||||
fp.Fragment = false
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case layers.IPProtocolTCP, layers.IPProtocolUDP:
|
|
||||||
if dataLen < offset+4 {
|
|
||||||
return ErrIPv6PacketTooShort
|
|
||||||
}
|
|
||||||
|
|
||||||
fp.Protocol = uint8(proto)
|
|
||||||
if incoming {
|
|
||||||
fp.RemotePort = binary.BigEndian.Uint16(data[offset : offset+2])
|
|
||||||
fp.LocalPort = binary.BigEndian.Uint16(data[offset+2 : offset+4])
|
|
||||||
} else {
|
|
||||||
fp.LocalPort = binary.BigEndian.Uint16(data[offset : offset+2])
|
|
||||||
fp.RemotePort = binary.BigEndian.Uint16(data[offset+2 : offset+4])
|
|
||||||
}
|
|
||||||
|
|
||||||
fp.Fragment = false
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case layers.IPProtocolIPv6Fragment:
|
|
||||||
// Fragment header is 8 bytes, need at least offset+4 to read the offset field
|
|
||||||
if dataLen < offset+8 {
|
|
||||||
return ErrIPv6PacketTooShort
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is the first fragment
|
|
||||||
fragmentOffset := binary.BigEndian.Uint16(data[offset+2:offset+4]) &^ uint16(0x7) // Remove the reserved and M flag bits
|
|
||||||
if fragmentOffset != 0 {
|
|
||||||
// Non-first fragment, use what we have now and stop processing
|
|
||||||
fp.Protocol = data[offset]
|
|
||||||
fp.Fragment = true
|
|
||||||
fp.RemotePort = 0
|
|
||||||
fp.LocalPort = 0
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The next loop should be the transport layer since we are the first fragment
|
|
||||||
next = 8 // Fragment headers are always 8 bytes
|
|
||||||
|
|
||||||
case layers.IPProtocolAH:
|
|
||||||
// Auth headers, used by IPSec, have a different meaning for header length
|
|
||||||
if dataLen <= offset+1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
next = int(data[offset+1]+2) << 2
|
|
||||||
|
|
||||||
default:
|
|
||||||
// Normal ipv6 header length processing
|
|
||||||
if dataLen <= offset+1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
next = int(data[offset+1]+1) << 3
|
|
||||||
}
|
|
||||||
|
|
||||||
if next <= 0 {
|
|
||||||
// Safety check, each ipv6 header has to be at least 8 bytes
|
|
||||||
next = 8
|
|
||||||
}
|
|
||||||
|
|
||||||
protoAt = offset
|
|
||||||
offset = offset + next
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrIPv6CouldNotFindPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV4(data []byte, incoming bool, fp *firewall.Packet) error {
|
|
||||||
// Do we at least have an ipv4 header worth of data?
|
// Do we at least have an ipv4 header worth of data?
|
||||||
if len(data) < ipv4.HeaderLen {
|
if len(data) < ipv4.HeaderLen {
|
||||||
return ErrIPv4PacketTooShort
|
return fmt.Errorf("packet is less than %v bytes", ipv4.HeaderLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is it an ipv4 packet?
|
||||||
|
if int((data[0]>>4)&0x0f) != 4 {
|
||||||
|
return fmt.Errorf("packet is not ipv4, type: %v", int((data[0]>>4)&0x0f))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adjust our start position based on the advertised ip header length
|
// Adjust our start position based on the advertised ip header length
|
||||||
ihl := int(data[0]&0x0f) << 2
|
ihl := int(data[0]&0x0f) << 2
|
||||||
|
|
||||||
// Well-formed ip header length?
|
// Well formed ip header length?
|
||||||
if ihl < ipv4.HeaderLen {
|
if ihl < ipv4.HeaderLen {
|
||||||
return ErrIPv4InvalidHeaderLength
|
return fmt.Errorf("packet had an invalid header length: %v", ihl)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if this is the second or further fragment of a fragmented packet.
|
// Check if this is the second or further fragment of a fragmented packet.
|
||||||
@ -418,13 +334,14 @@ func parseV4(data []byte, incoming bool, fp *firewall.Packet) error {
|
|||||||
minLen += minFwPacketLen
|
minLen += minFwPacketLen
|
||||||
}
|
}
|
||||||
if len(data) < minLen {
|
if len(data) < minLen {
|
||||||
return ErrIPv4InvalidHeaderLength
|
return fmt.Errorf("packet is less than %v bytes, ip header len: %v", minLen, ihl)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Firewall packets are locally oriented
|
// Firewall packets are locally oriented
|
||||||
if incoming {
|
if incoming {
|
||||||
fp.RemoteAddr, _ = netip.AddrFromSlice(data[12:16])
|
//TODO: IPV6-WORK
|
||||||
fp.LocalAddr, _ = netip.AddrFromSlice(data[16:20])
|
fp.RemoteIP, _ = netip.AddrFromSlice(data[12:16])
|
||||||
|
fp.LocalIP, _ = netip.AddrFromSlice(data[16:20])
|
||||||
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
||||||
fp.RemotePort = 0
|
fp.RemotePort = 0
|
||||||
fp.LocalPort = 0
|
fp.LocalPort = 0
|
||||||
@ -433,8 +350,9 @@ func parseV4(data []byte, incoming bool, fp *firewall.Packet) error {
|
|||||||
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fp.LocalAddr, _ = netip.AddrFromSlice(data[12:16])
|
//TODO: IPV6-WORK
|
||||||
fp.RemoteAddr, _ = netip.AddrFromSlice(data[16:20])
|
fp.LocalIP, _ = netip.AddrFromSlice(data[12:16])
|
||||||
|
fp.RemoteIP, _ = netip.AddrFromSlice(data[16:20])
|
||||||
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
||||||
fp.RemotePort = 0
|
fp.RemotePort = 0
|
||||||
fp.LocalPort = 0
|
fp.LocalPort = 0
|
||||||
@ -469,6 +387,8 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
||||||
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
||||||
|
//f.sendRecvError(hostinfo.remote, header.RemoteIndex)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -515,8 +435,9 @@ func (f *Interface) maybeSendRecvError(endpoint netip.AddrPort, index uint32) {
|
|||||||
func (f *Interface) sendRecvError(endpoint netip.AddrPort, index uint32) {
|
func (f *Interface) sendRecvError(endpoint netip.AddrPort, index uint32) {
|
||||||
f.messageMetrics.Tx(header.RecvError, 0, 1)
|
f.messageMetrics.Tx(header.RecvError, 0, 1)
|
||||||
|
|
||||||
|
//TODO: this should be a signed message so we can trust that we should drop the index
|
||||||
b := header.Encode(make([]byte, header.Len), header.Version, header.RecvError, 0, index, 0)
|
b := header.Encode(make([]byte, header.Len), header.Version, header.RecvError, 0, index, 0)
|
||||||
_ = f.outside.WriteTo(b, endpoint)
|
f.outside.WriteTo(b, endpoint)
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
f.l.WithField("index", index).
|
f.l.WithField("index", index).
|
||||||
WithField("udpAddr", endpoint).
|
WithField("udpAddr", endpoint).
|
||||||
@ -550,3 +471,65 @@ func (f *Interface) handleRecvError(addr netip.AddrPort, h *header.H) {
|
|||||||
// We also delete it from pending hostmap to allow for fast reconnect.
|
// We also delete it from pending hostmap to allow for fast reconnect.
|
||||||
f.handshakeManager.DeleteHostInfo(hostinfo)
|
f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func (f *Interface) sendMeta(ci *ConnectionState, endpoint *net.UDPAddr, meta *NebulaMeta) {
|
||||||
|
if ci.eKey != nil {
|
||||||
|
//TODO: log error?
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := proto.Marshal(meta)
|
||||||
|
if err != nil {
|
||||||
|
l.Debugln("failed to encode header")
|
||||||
|
}
|
||||||
|
|
||||||
|
c := ci.messageCounter
|
||||||
|
b := HeaderEncode(nil, Version, uint8(metadata), 0, hostinfo.remoteIndexId, c)
|
||||||
|
ci.messageCounter++
|
||||||
|
|
||||||
|
msg := ci.eKey.EncryptDanger(b, nil, msg, c)
|
||||||
|
//msg := ci.eKey.EncryptDanger(b, nil, []byte(fmt.Sprintf("%d", counter)), c)
|
||||||
|
f.outside.WriteTo(msg, endpoint)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
func RecombineCertAndValidate(h *noise.HandshakeState, rawCertBytes []byte, caPool *cert.NebulaCAPool) (*cert.NebulaCertificate, error) {
|
||||||
|
pk := h.PeerStatic()
|
||||||
|
|
||||||
|
if pk == nil {
|
||||||
|
return nil, errors.New("no peer static key was present")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rawCertBytes == nil {
|
||||||
|
return nil, errors.New("provided payload was empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &cert.RawNebulaCertificate{}
|
||||||
|
err := proto.Unmarshal(rawCertBytes, r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error unmarshaling cert: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the Details are nil, just exit to avoid crashing
|
||||||
|
if r.Details == nil {
|
||||||
|
return nil, fmt.Errorf("certificate did not contain any details")
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Details.PublicKey = pk
|
||||||
|
recombined, err := proto.Marshal(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error while recombining certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, _ := cert.UnmarshalNebulaCertificate(recombined)
|
||||||
|
isValid, err := c.Verify(time.Now(), caPool)
|
||||||
|
if err != nil {
|
||||||
|
return c, fmt.Errorf("certificate validation failed: %s", err)
|
||||||
|
} else if !isValid {
|
||||||
|
// This case should never happen but here's to defensive programming!
|
||||||
|
return c, errors.New("certificate validation failed but did not return an error")
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|||||||
590
outside_test.go
590
outside_test.go
@ -1,33 +1,21 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/gopacket"
|
|
||||||
"github.com/google/gopacket/layers"
|
|
||||||
|
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_newPacket(t *testing.T) {
|
func Test_newPacket(t *testing.T) {
|
||||||
p := &firewall.Packet{}
|
p := &firewall.Packet{}
|
||||||
|
|
||||||
// length fails
|
// length fail
|
||||||
err := newPacket([]byte{}, true, p)
|
err := newPacket([]byte{0, 1}, true, p)
|
||||||
require.ErrorIs(t, err, ErrPacketTooShort)
|
assert.EqualError(t, err, "packet is less than 20 bytes")
|
||||||
|
|
||||||
err = newPacket([]byte{0x40}, true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv4PacketTooShort)
|
|
||||||
|
|
||||||
err = newPacket([]byte{0x60}, true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
|
|
||||||
|
|
||||||
// length fail with ip options
|
// length fail with ip options
|
||||||
h := ipv4.Header{
|
h := ipv4.Header{
|
||||||
@ -40,15 +28,16 @@ func Test_newPacket(t *testing.T) {
|
|||||||
|
|
||||||
b, _ := h.Marshal()
|
b, _ := h.Marshal()
|
||||||
err = newPacket(b, true, p)
|
err = newPacket(b, true, p)
|
||||||
require.ErrorIs(t, err, ErrIPv4InvalidHeaderLength)
|
|
||||||
|
assert.EqualError(t, err, "packet is less than 28 bytes, ip header len: 24")
|
||||||
|
|
||||||
// not an ipv4 packet
|
// not an ipv4 packet
|
||||||
err = newPacket([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true, p)
|
err = newPacket([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true, p)
|
||||||
require.ErrorIs(t, err, ErrUnknownIPVersion)
|
assert.EqualError(t, err, "packet is not ipv4, type: 0")
|
||||||
|
|
||||||
// invalid ihl
|
// invalid ihl
|
||||||
err = newPacket([]byte{4<<4 | (8 >> 2 & 0x0f), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true, p)
|
err = newPacket([]byte{4<<4 | (8 >> 2 & 0x0f), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true, p)
|
||||||
require.ErrorIs(t, err, ErrIPv4InvalidHeaderLength)
|
assert.EqualError(t, err, "packet had an invalid header length: 8")
|
||||||
|
|
||||||
// account for variable ip header length - incoming
|
// account for variable ip header length - incoming
|
||||||
h = ipv4.Header{
|
h = ipv4.Header{
|
||||||
@ -64,13 +53,12 @@ func Test_newPacket(t *testing.T) {
|
|||||||
b = append(b, []byte{0, 3, 0, 4}...)
|
b = append(b, []byte{0, 3, 0, 4}...)
|
||||||
err = newPacket(b, true, p)
|
err = newPacket(b, true, p)
|
||||||
|
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, uint8(firewall.ProtoTCP), p.Protocol)
|
assert.Equal(t, p.Protocol, uint8(firewall.ProtoTCP))
|
||||||
assert.Equal(t, netip.MustParseAddr("10.0.0.2"), p.LocalAddr)
|
assert.Equal(t, p.LocalIP, netip.MustParseAddr("10.0.0.2"))
|
||||||
assert.Equal(t, netip.MustParseAddr("10.0.0.1"), p.RemoteAddr)
|
assert.Equal(t, p.RemoteIP, netip.MustParseAddr("10.0.0.1"))
|
||||||
assert.Equal(t, uint16(3), p.RemotePort)
|
assert.Equal(t, p.RemotePort, uint16(3))
|
||||||
assert.Equal(t, uint16(4), p.LocalPort)
|
assert.Equal(t, p.LocalPort, uint16(4))
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// account for variable ip header length - outgoing
|
// account for variable ip header length - outgoing
|
||||||
h = ipv4.Header{
|
h = ipv4.Header{
|
||||||
@ -86,550 +74,10 @@ func Test_newPacket(t *testing.T) {
|
|||||||
b = append(b, []byte{0, 5, 0, 6}...)
|
b = append(b, []byte{0, 5, 0, 6}...)
|
||||||
err = newPacket(b, false, p)
|
err = newPacket(b, false, p)
|
||||||
|
|
||||||
require.NoError(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, uint8(2), p.Protocol)
|
assert.Equal(t, p.Protocol, uint8(2))
|
||||||
assert.Equal(t, netip.MustParseAddr("10.0.0.1"), p.LocalAddr)
|
assert.Equal(t, p.LocalIP, netip.MustParseAddr("10.0.0.1"))
|
||||||
assert.Equal(t, netip.MustParseAddr("10.0.0.2"), p.RemoteAddr)
|
assert.Equal(t, p.RemoteIP, netip.MustParseAddr("10.0.0.2"))
|
||||||
assert.Equal(t, uint16(6), p.RemotePort)
|
assert.Equal(t, p.RemotePort, uint16(6))
|
||||||
assert.Equal(t, uint16(5), p.LocalPort)
|
assert.Equal(t, p.LocalPort, uint16(5))
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_newPacket_v6(t *testing.T) {
|
|
||||||
p := &firewall.Packet{}
|
|
||||||
|
|
||||||
// invalid ipv6
|
|
||||||
ip := layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
HopLimit: 128,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := gopacket.NewSerializeBuffer()
|
|
||||||
opt := gopacket.SerializeOptions{
|
|
||||||
ComputeChecksums: false,
|
|
||||||
FixLengths: false,
|
|
||||||
}
|
|
||||||
err := gopacket.SerializeLayers(buffer, opt, &ip)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = newPacket(buffer.Bytes(), true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
|
||||||
|
|
||||||
// A v6 packet with a hop-by-hop extension
|
|
||||||
// ICMPv6 Payload (Echo Request)
|
|
||||||
icmpLayer := layers.ICMPv6{
|
|
||||||
TypeCode: layers.ICMPv6TypeEchoRequest,
|
|
||||||
}
|
|
||||||
// Hop-by-Hop Extension Header
|
|
||||||
hopOption := layers.IPv6HopByHopOption{}
|
|
||||||
hopOption.OptionData = []byte{0, 0, 0, 0}
|
|
||||||
hopByHop := layers.IPv6HopByHop{}
|
|
||||||
hopByHop.Options = append(hopByHop.Options, &hopOption)
|
|
||||||
|
|
||||||
ip = layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
HopLimit: 128,
|
|
||||||
NextHeader: layers.IPProtocolIPv6Destination,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.Clear()
|
|
||||||
err = gopacket.SerializeLayers(buffer, gopacket.SerializeOptions{
|
|
||||||
ComputeChecksums: false,
|
|
||||||
FixLengths: true,
|
|
||||||
}, &ip, &hopByHop, &icmpLayer)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
// Ensure buffer length checks during parsing with the next 2 tests.
|
|
||||||
|
|
||||||
// A full IPv6 header and 1 byte in the first extension, but missing
|
|
||||||
// the length byte.
|
|
||||||
err = newPacket(buffer.Bytes()[:41], true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
|
||||||
|
|
||||||
// A full IPv6 header plus 1 full extension, but only 1 byte of the
|
|
||||||
// next layer, missing length byte
|
|
||||||
err = newPacket(buffer.Bytes()[:49], true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
|
||||||
|
|
||||||
// A good ICMP packet
|
|
||||||
ip = layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
NextHeader: layers.IPProtocolICMPv6,
|
|
||||||
HopLimit: 128,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
icmp := layers.ICMPv6{}
|
|
||||||
|
|
||||||
buffer.Clear()
|
|
||||||
err = gopacket.SerializeLayers(buffer, opt, &ip, &icmp)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = newPacket(buffer.Bytes(), true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(layers.IPProtocolICMPv6), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint16(0), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(0), p.LocalPort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// A good ESP packet
|
|
||||||
b := buffer.Bytes()
|
|
||||||
b[6] = byte(layers.IPProtocolESP)
|
|
||||||
err = newPacket(b, true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(layers.IPProtocolESP), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint16(0), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(0), p.LocalPort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// A good None packet
|
|
||||||
b = buffer.Bytes()
|
|
||||||
b[6] = byte(layers.IPProtocolNoNextHeader)
|
|
||||||
err = newPacket(b, true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(layers.IPProtocolNoNextHeader), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint16(0), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(0), p.LocalPort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// An unknown protocol packet
|
|
||||||
b = buffer.Bytes()
|
|
||||||
b[6] = 255 // 255 is a reserved protocol number
|
|
||||||
err = newPacket(b, true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
|
||||||
|
|
||||||
// A good UDP packet
|
|
||||||
ip = layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
NextHeader: firewall.ProtoUDP,
|
|
||||||
HopLimit: 128,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
udp := layers.UDP{
|
|
||||||
SrcPort: layers.UDPPort(36123),
|
|
||||||
DstPort: layers.UDPPort(22),
|
|
||||||
}
|
|
||||||
err = udp.SetNetworkLayerForChecksum(&ip)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
buffer.Clear()
|
|
||||||
err = gopacket.SerializeLayers(buffer, opt, &ip, &udp, gopacket.Payload([]byte{0xde, 0xad, 0xbe, 0xef}))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
b = buffer.Bytes()
|
|
||||||
|
|
||||||
// incoming
|
|
||||||
err = newPacket(b, true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(firewall.ProtoUDP), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint16(36123), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(22), p.LocalPort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// outgoing
|
|
||||||
err = newPacket(b, false, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(firewall.ProtoUDP), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.LocalAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, uint16(36123), p.LocalPort)
|
|
||||||
assert.Equal(t, uint16(22), p.RemotePort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// Too short UDP packet
|
|
||||||
err = newPacket(b[:len(b)-10], false, p) // pull off the last 10 bytes
|
|
||||||
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
|
|
||||||
|
|
||||||
// A good TCP packet
|
|
||||||
b[6] = byte(layers.IPProtocolTCP)
|
|
||||||
|
|
||||||
// incoming
|
|
||||||
err = newPacket(b, true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(firewall.ProtoTCP), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint16(36123), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(22), p.LocalPort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// outgoing
|
|
||||||
err = newPacket(b, false, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(firewall.ProtoTCP), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.LocalAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, uint16(36123), p.LocalPort)
|
|
||||||
assert.Equal(t, uint16(22), p.RemotePort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// Too short TCP packet
|
|
||||||
err = newPacket(b[:len(b)-10], false, p) // pull off the last 10 bytes
|
|
||||||
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
|
|
||||||
|
|
||||||
// A good UDP packet with an AH header
|
|
||||||
ip = layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
NextHeader: layers.IPProtocolAH,
|
|
||||||
HopLimit: 128,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
ah := layers.IPSecAH{
|
|
||||||
AuthenticationData: []byte{0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef},
|
|
||||||
}
|
|
||||||
ah.NextHeader = layers.IPProtocolUDP
|
|
||||||
|
|
||||||
udpHeader := []byte{
|
|
||||||
0x8d, 0x1b, // Source port 36123
|
|
||||||
0x00, 0x16, // Destination port 22
|
|
||||||
0x00, 0x00, // Length
|
|
||||||
0x00, 0x00, // Checksum
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.Clear()
|
|
||||||
err = ip.SerializeTo(buffer, opt)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
b = buffer.Bytes()
|
|
||||||
ahb := serializeAH(&ah)
|
|
||||||
b = append(b, ahb...)
|
|
||||||
b = append(b, udpHeader...)
|
|
||||||
|
|
||||||
err = newPacket(b, true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, uint8(firewall.ProtoUDP), p.Protocol)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint16(36123), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(22), p.LocalPort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// Ensure buffer bounds checking during processing
|
|
||||||
err = newPacket(b[:41], true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
|
|
||||||
|
|
||||||
// Invalid AH header
|
|
||||||
b = buffer.Bytes()
|
|
||||||
err = newPacket(b, true, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_newPacket_ipv6Fragment(t *testing.T) {
|
|
||||||
p := &firewall.Packet{}
|
|
||||||
|
|
||||||
ip := &layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
NextHeader: layers.IPProtocolIPv6Fragment,
|
|
||||||
HopLimit: 64,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
// First fragment
|
|
||||||
fragHeader1 := []byte{
|
|
||||||
uint8(layers.IPProtocolUDP), // Next Header (UDP)
|
|
||||||
0x00, // Reserved
|
|
||||||
0x00, // Fragment Offset high byte (0)
|
|
||||||
0x01, // Fragment Offset low byte & flags (M=1)
|
|
||||||
0x00, 0x00, 0x00, 0x01, // Identification
|
|
||||||
}
|
|
||||||
|
|
||||||
udpHeader := []byte{
|
|
||||||
0x8d, 0x1b, // Source port 36123
|
|
||||||
0x00, 0x16, // Destination port 22
|
|
||||||
0x00, 0x00, // Length
|
|
||||||
0x00, 0x00, // Checksum
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := gopacket.NewSerializeBuffer()
|
|
||||||
opts := gopacket.SerializeOptions{
|
|
||||||
ComputeChecksums: true,
|
|
||||||
FixLengths: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := ip.SerializeTo(buffer, opts)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
firstFrag := buffer.Bytes()
|
|
||||||
firstFrag = append(firstFrag, fragHeader1...)
|
|
||||||
firstFrag = append(firstFrag, udpHeader...)
|
|
||||||
firstFrag = append(firstFrag, []byte{0xde, 0xad, 0xbe, 0xef}...)
|
|
||||||
|
|
||||||
// Test first fragment incoming
|
|
||||||
err = newPacket(firstFrag, true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint8(layers.IPProtocolUDP), p.Protocol)
|
|
||||||
assert.Equal(t, uint16(36123), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(22), p.LocalPort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// Test first fragment outgoing
|
|
||||||
err = newPacket(firstFrag, false, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.LocalAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, uint8(layers.IPProtocolUDP), p.Protocol)
|
|
||||||
assert.Equal(t, uint16(36123), p.LocalPort)
|
|
||||||
assert.Equal(t, uint16(22), p.RemotePort)
|
|
||||||
assert.False(t, p.Fragment)
|
|
||||||
|
|
||||||
// Second fragment
|
|
||||||
fragHeader2 := []byte{
|
|
||||||
uint8(layers.IPProtocolUDP), // Next Header (UDP)
|
|
||||||
0x00, // Reserved
|
|
||||||
0xb9, // Fragment Offset high byte (185)
|
|
||||||
0x01, // Fragment Offset low byte & flags (M=1)
|
|
||||||
0x00, 0x00, 0x00, 0x01, // Identification
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.Clear()
|
|
||||||
err = ip.SerializeTo(buffer, opts)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
secondFrag := buffer.Bytes()
|
|
||||||
secondFrag = append(secondFrag, fragHeader2...)
|
|
||||||
secondFrag = append(secondFrag, []byte{0xde, 0xad, 0xbe, 0xef}...)
|
|
||||||
|
|
||||||
// Test second fragment incoming
|
|
||||||
err = newPacket(secondFrag, true, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.LocalAddr)
|
|
||||||
assert.Equal(t, uint8(layers.IPProtocolUDP), p.Protocol)
|
|
||||||
assert.Equal(t, uint16(0), p.RemotePort)
|
|
||||||
assert.Equal(t, uint16(0), p.LocalPort)
|
|
||||||
assert.True(t, p.Fragment)
|
|
||||||
|
|
||||||
// Test second fragment outgoing
|
|
||||||
err = newPacket(secondFrag, false, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::2"), p.LocalAddr)
|
|
||||||
assert.Equal(t, netip.MustParseAddr("ff02::1"), p.RemoteAddr)
|
|
||||||
assert.Equal(t, uint8(layers.IPProtocolUDP), p.Protocol)
|
|
||||||
assert.Equal(t, uint16(0), p.LocalPort)
|
|
||||||
assert.Equal(t, uint16(0), p.RemotePort)
|
|
||||||
assert.True(t, p.Fragment)
|
|
||||||
|
|
||||||
// Too short of a fragment packet
|
|
||||||
err = newPacket(secondFrag[:len(secondFrag)-10], false, p)
|
|
||||||
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkParseV6(b *testing.B) {
|
|
||||||
// Regular UDP packet
|
|
||||||
ip := &layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
NextHeader: layers.IPProtocolUDP,
|
|
||||||
HopLimit: 64,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
udp := &layers.UDP{
|
|
||||||
SrcPort: layers.UDPPort(36123),
|
|
||||||
DstPort: layers.UDPPort(22),
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := gopacket.NewSerializeBuffer()
|
|
||||||
opts := gopacket.SerializeOptions{
|
|
||||||
ComputeChecksums: false,
|
|
||||||
FixLengths: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := gopacket.SerializeLayers(buffer, opts, ip, udp)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
normalPacket := buffer.Bytes()
|
|
||||||
|
|
||||||
// First Fragment packet
|
|
||||||
ipFrag := &layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
NextHeader: layers.IPProtocolIPv6Fragment,
|
|
||||||
HopLimit: 64,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
fragHeader := []byte{
|
|
||||||
uint8(layers.IPProtocolUDP), // Next Header (UDP)
|
|
||||||
0x00, // Reserved
|
|
||||||
0x00, // Fragment Offset high byte (0)
|
|
||||||
0x01, // Fragment Offset low byte & flags (M=1)
|
|
||||||
0x00, 0x00, 0x00, 0x01, // Identification
|
|
||||||
}
|
|
||||||
|
|
||||||
udpHeader := []byte{
|
|
||||||
0x8d, 0x7b, // Source port 36123
|
|
||||||
0x00, 0x16, // Destination port 22
|
|
||||||
0x00, 0x00, // Length
|
|
||||||
0x00, 0x00, // Checksum
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.Clear()
|
|
||||||
err = ipFrag.SerializeTo(buffer, opts)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
firstFrag := buffer.Bytes()
|
|
||||||
firstFrag = append(firstFrag, fragHeader...)
|
|
||||||
firstFrag = append(firstFrag, udpHeader...)
|
|
||||||
firstFrag = append(firstFrag, []byte{0xde, 0xad, 0xbe, 0xef}...)
|
|
||||||
|
|
||||||
// Second Fragment packet
|
|
||||||
fragHeader[2] = 0xb9 // offset 185
|
|
||||||
buffer.Clear()
|
|
||||||
err = ipFrag.SerializeTo(buffer, opts)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
secondFrag := buffer.Bytes()
|
|
||||||
secondFrag = append(secondFrag, fragHeader...)
|
|
||||||
secondFrag = append(secondFrag, []byte{0xde, 0xad, 0xbe, 0xef}...)
|
|
||||||
|
|
||||||
fp := &firewall.Packet{}
|
|
||||||
|
|
||||||
b.Run("Normal", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if err = parseV6(normalPacket, true, fp); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
b.Run("FirstFragment", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if err = parseV6(firstFrag, true, fp); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
b.Run("SecondFragment", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if err = parseV6(secondFrag, true, fp); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Evil packet
|
|
||||||
evilPacket := &layers.IPv6{
|
|
||||||
Version: 6,
|
|
||||||
NextHeader: layers.IPProtocolIPv6HopByHop,
|
|
||||||
HopLimit: 64,
|
|
||||||
SrcIP: net.IPv6linklocalallrouters,
|
|
||||||
DstIP: net.IPv6linklocalallnodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
hopHeader := []byte{
|
|
||||||
uint8(layers.IPProtocolIPv6HopByHop), // Next Header (HopByHop)
|
|
||||||
0x00, // Length
|
|
||||||
0x00, 0x00, // Options and padding
|
|
||||||
0x00, 0x00, 0x00, 0x00, // More options and padding
|
|
||||||
}
|
|
||||||
|
|
||||||
lastHopHeader := []byte{
|
|
||||||
uint8(layers.IPProtocolUDP), // Next Header (UDP)
|
|
||||||
0x00, // Length
|
|
||||||
0x00, 0x00, // Options and padding
|
|
||||||
0x00, 0x00, 0x00, 0x00, // More options and padding
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.Clear()
|
|
||||||
err = evilPacket.SerializeTo(buffer, opts)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
evilBytes := buffer.Bytes()
|
|
||||||
for i := 0; i < 200; i++ {
|
|
||||||
evilBytes = append(evilBytes, hopHeader...)
|
|
||||||
}
|
|
||||||
evilBytes = append(evilBytes, lastHopHeader...)
|
|
||||||
evilBytes = append(evilBytes, udpHeader...)
|
|
||||||
evilBytes = append(evilBytes, []byte{0xde, 0xad, 0xbe, 0xef}...)
|
|
||||||
|
|
||||||
b.Run("200 HopByHop headers", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if err = parseV6(evilBytes, false, fp); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure authentication data is a multiple of 8 bytes by padding if necessary
|
|
||||||
func padAuthData(authData []byte) []byte {
|
|
||||||
// Length of Authentication Data must be a multiple of 8 bytes
|
|
||||||
paddingLength := (8 - (len(authData) % 8)) % 8 // Only pad if necessary
|
|
||||||
if paddingLength > 0 {
|
|
||||||
authData = append(authData, make([]byte, paddingLength)...)
|
|
||||||
}
|
|
||||||
return authData
|
|
||||||
}
|
|
||||||
|
|
||||||
// Custom function to manually serialize IPSecAH for both IPv4 and IPv6
|
|
||||||
func serializeAH(ah *layers.IPSecAH) []byte {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
// Ensure Authentication Data is a multiple of 8 bytes
|
|
||||||
ah.AuthenticationData = padAuthData(ah.AuthenticationData)
|
|
||||||
// Calculate Payload Length (in 32-bit words, minus 2)
|
|
||||||
payloadLen := uint8((12+len(ah.AuthenticationData))/4) - 2
|
|
||||||
|
|
||||||
// Serialize fields
|
|
||||||
if err := binary.Write(buf, binary.BigEndian, ah.NextHeader); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if err := binary.Write(buf, binary.BigEndian, payloadLen); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if err := binary.Write(buf, binary.BigEndian, ah.Reserved); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if err := binary.Write(buf, binary.BigEndian, ah.SPI); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if err := binary.Write(buf, binary.BigEndian, ah.Seq); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if len(ah.AuthenticationData) > 0 {
|
|
||||||
if err := binary.Write(buf, binary.BigEndian, ah.AuthenticationData); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user