mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-12 23:33:58 +01:00
The goal of this work is to send packets between two hosts using more than one
5-tuple. When running on networks like AWS where the underlying network driver
and overlay fabric makes routing, load balancing, and failover decisions based
on the flow hash, this enables more than one flow between pairs of hosts.
Multiport spreads outgoing UDP packets across multiple UDP send ports,
which allows nebula to work around any issues on the underlay network.
Some example issues this could work around:
- UDP rate limits on a per flow basis.
- Partial underlay network failure in which some flows work and some don't
Agreement is done during the handshake to decide if multiport mode will
be used for a given tunnel (one side must have tx_enabled set, the other
side must have rx_enabled set)
NOTE: you cannot use multiport on a host if you are relying on UDP hole
punching to get through a NAT or firewall.
NOTE: Linux only (uses raw sockets to send). Also currently only works
with IPv4 underlay network remotes.
This is implemented by opening a raw socket and sending packets with
a source port that is based on a hash of the overlay source/destiation
port. For ICMP and Nebula metadata packets, we use a random source port.
Example configuration:
multiport:
# This host support sending via multiple UDP ports.
tx_enabled: false
# This host supports receiving packets sent from multiple UDP ports.
rx_enabled: false
# How many UDP ports to use when sending. The lowest source port will be
# listen.port and go up to (but not including) listen.port + tx_ports.
tx_ports: 100
# NOTE: All of your hosts must be running a version of Nebula that supports
# multiport if you want to enable this feature. Older versions of Nebula
# will be confused by these multiport handshakes.
#
# If handshakes are not getting a response, attempt to transmit handshakes
# using random UDP source ports (to get around partial underlay network
# failures).
tx_handshake: false
# How many unresponded handshakes we should send before we attempt to
# send multiport handshakes.
tx_handshake_delay: 2
96 lines
2.7 KiB
Bash
Executable File
96 lines
2.7 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
set -e -x
|
|
|
|
set -o pipefail
|
|
|
|
mkdir -p logs
|
|
|
|
cleanup() {
|
|
echo
|
|
echo " *** cleanup"
|
|
echo
|
|
|
|
set +e
|
|
if [ "$(jobs -r)" ]
|
|
then
|
|
sudo docker kill lighthouse1 host2 host3 host4
|
|
fi
|
|
}
|
|
|
|
trap cleanup EXIT
|
|
|
|
CONTAINER="nebula:${NAME:-smoke}"
|
|
|
|
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
|
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
|
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
|
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
|
|
|
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
|
sleep 1
|
|
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
|
sleep 1
|
|
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
|
sleep 1
|
|
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
|
sleep 1
|
|
|
|
set +x
|
|
echo
|
|
echo " *** Testing ping from lighthouse1"
|
|
echo
|
|
set -x
|
|
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
|
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
|
|
|
set +x
|
|
echo
|
|
echo " *** Testing ping from host2"
|
|
echo
|
|
set -x
|
|
sudo docker exec host2 ping -c1 192.168.100.1
|
|
# Should fail because not allowed by host3 inbound firewall
|
|
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
|
|
|
set +x
|
|
echo
|
|
echo " *** Testing ping from host3"
|
|
echo
|
|
set -x
|
|
sudo docker exec host3 ping -c1 192.168.100.1
|
|
sudo docker exec host3 ping -c1 192.168.100.2
|
|
|
|
set +x
|
|
echo
|
|
echo " *** Testing ping from host4"
|
|
echo
|
|
set -x
|
|
sudo docker exec host4 ping -c1 192.168.100.1
|
|
# Should fail because not allowed by host4 outbound firewall
|
|
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
|
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
|
|
|
set +x
|
|
echo
|
|
echo " *** Testing conntrack"
|
|
echo
|
|
set -x
|
|
# host2 can ping host3 now that host3 pinged it first
|
|
sudo docker exec host2 ping -c1 192.168.100.3
|
|
# host4 can ping host2 once conntrack established
|
|
sudo docker exec host2 ping -c1 192.168.100.4
|
|
sudo docker exec host4 ping -c1 192.168.100.2
|
|
|
|
sudo docker exec host4 sh -c 'kill 1'
|
|
sudo docker exec host3 sh -c 'kill 1'
|
|
sudo docker exec host2 sh -c 'kill 1'
|
|
sudo docker exec lighthouse1 sh -c 'kill 1'
|
|
sleep 1
|
|
|
|
if [ "$(jobs -r)" ]
|
|
then
|
|
echo "nebula still running after SIGTERM sent" >&2
|
|
exit 1
|
|
fi
|