nebula/udp/udp_raw_linux.go
Wade Simmons 326fc8758d Support multiple UDP source ports (multiport)
The goal of this work is to send packets between two hosts using more than one
5-tuple. When running on networks like AWS where the underlying network driver
and overlay fabric makes routing, load balancing, and failover decisions based
on the flow hash, this enables more than one flow between pairs of hosts.

Multiport spreads outgoing UDP packets across multiple UDP send ports,
which allows nebula to work around any issues on the underlay network.
Some example issues this could work around:

- UDP rate limits on a per flow basis.
- Partial underlay network failure in which some flows work and some don't

Agreement is done during the handshake to decide if multiport mode will
be used for a given tunnel (one side must have tx_enabled set, the other
side must have rx_enabled set)

NOTE: you cannot use multiport on a host if you are relying on UDP hole
punching to get through a NAT or firewall.

NOTE: Linux only (uses raw sockets to send). Also currently only works
with IPv4 underlay network remotes.

This is implemented by opening a raw socket and sending packets with
a source port that is based on a hash of the overlay source/destiation
port. For ICMP and Nebula metadata packets, we use a random source port.

Example configuration:

    multiport:
      # This host support sending via multiple UDP ports.
      tx_enabled: false

      # This host supports receiving packets sent from multiple UDP ports.
      rx_enabled: false

      # How many UDP ports to use when sending. The lowest source port will be
      # listen.port and go up to (but not including) listen.port + tx_ports.
      tx_ports: 100

      # NOTE: All of your hosts must be running a version of Nebula that supports
      # multiport if you want to enable this feature. Older versions of Nebula
      # will be confused by these multiport handshakes.
      #
      # If handshakes are not getting a response, attempt to transmit handshakes
      # using random UDP source ports (to get around partial underlay network
      # failures).
      tx_handshake: false

      # How many unresponded handshakes we should send before we attempt to
      # send multiport handshakes.
      tx_handshake_delay: 2
2022-10-17 12:58:06 -04:00

191 lines
5.7 KiB
Go

//go:build !e2e_testing
// +build !e2e_testing
package udp
import (
"encoding/binary"
"fmt"
"net"
"syscall"
"unsafe"
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"golang.org/x/net/ipv4"
"golang.org/x/sys/unix"
)
// RawOverhead is the number of bytes that need to be reserved at the start of
// the raw bytes passed to (*RawConn).WriteTo. This is used by WriteTo to prefix
// the IP and UDP headers.
const RawOverhead = 28
type RawConn struct {
sysFd int
basePort uint16
l *logrus.Logger
}
func NewRawConn(l *logrus.Logger, ip string, port int, basePort uint16) (*RawConn, error) {
syscall.ForkLock.RLock()
// With IPPROTO_UDP, the linux kernel tries to deliver every UDP packet
// received in the system to our socket. This constantly overflows our
// buffer and marks our socket as having dropped packets. This makes the
// stats on the socket useless.
//
// In contrast, IPPROTO_RAW is not delivered any packets and thus our read
// buffer will not fill up and mark as having dropped packets. The only
// difference is that we have to assemble the IP header as well, but this
// is fairly easy since Linux does the checksum for us.
//
// TODO: How to get this working with Inet6 correctly? I was having issues
// with the source address when testing before, probably need to `bind(2)`?
fd, err := unix.Socket(unix.AF_INET, unix.SOCK_RAW, unix.IPPROTO_RAW)
if err == nil {
unix.CloseOnExec(fd)
}
syscall.ForkLock.RUnlock()
if err != nil {
return nil, err
}
// We only want to send, not recv. This will hopefully help the kernel avoid
// wasting time on us
if err = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, 0); err != nil {
return nil, fmt.Errorf("unable to set SO_RCVBUF: %s", err)
}
var lip [16]byte
copy(lip[:], net.ParseIP(ip))
// TODO do we need to `bind(2)` so that we send from the correct address/interface?
if err = unix.Bind(fd, &unix.SockaddrInet6{Addr: lip, Port: port}); err != nil {
return nil, fmt.Errorf("unable to bind to socket: %s", err)
}
return &RawConn{
sysFd: fd,
basePort: basePort,
l: l,
}, nil
}
// WriteTo must be called with raw leaving the first `udp.RawOverhead` bytes empty,
// for the IP/UDP headers.
func (u *RawConn) WriteTo(raw []byte, fromPort uint16, addr *Addr) error {
var rsa unix.RawSockaddrInet4
rsa.Family = unix.AF_INET
copy(rsa.Addr[:], addr.IP.To4())
totalLen := len(raw)
udpLen := totalLen - ipv4.HeaderLen
// IP header
raw[0] = byte(ipv4.Version<<4 | (ipv4.HeaderLen >> 2 & 0x0f))
raw[1] = 0 // tos
binary.BigEndian.PutUint16(raw[2:4], uint16(totalLen))
binary.BigEndian.PutUint16(raw[4:6], 0) // id (linux does it for us)
binary.BigEndian.PutUint16(raw[6:8], 0) // frag options
raw[8] = byte(64) // ttl
raw[9] = byte(17) // protocol
binary.BigEndian.PutUint16(raw[10:12], 0) // checksum (linux does it for us)
binary.BigEndian.PutUint32(raw[12:16], 0) // src (linux does it for us)
copy(raw[16:20], rsa.Addr[:]) // dst
// UDP header
fromPort = u.basePort + fromPort
binary.BigEndian.PutUint16(raw[20:22], uint16(fromPort)) // src port
binary.BigEndian.PutUint16(raw[22:24], uint16(addr.Port)) // dst port
binary.BigEndian.PutUint16(raw[24:26], uint16(udpLen)) // UDP length
binary.BigEndian.PutUint16(raw[26:28], 0) // checksum (optional)
for {
_, _, err := unix.Syscall6(
unix.SYS_SENDTO,
uintptr(u.sysFd),
uintptr(unsafe.Pointer(&raw[0])),
uintptr(len(raw)),
uintptr(0),
uintptr(unsafe.Pointer(&rsa)),
uintptr(unix.SizeofSockaddrInet4),
)
if err != 0 {
return &net.OpError{Op: "sendto", Err: err}
}
//TODO: handle incomplete writes
return nil
}
}
func (u *RawConn) ReloadConfig(c *config.C) {
b := c.GetInt("listen.write_buffer", 0)
if b <= 0 {
return
}
if err := u.SetSendBuffer(b); err != nil {
u.l.WithError(err).Error("Failed to set listen.write_buffer")
return
}
s, err := u.GetSendBuffer()
if err != nil {
u.l.WithError(err).Warn("Failed to get listen.write_buffer")
return
}
u.l.WithField("size", s).Info("listen.write_buffer was set")
}
func (u *RawConn) SetSendBuffer(n int) error {
return unix.SetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, n)
}
func (u *RawConn) GetSendBuffer() (int, error) {
return unix.GetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_SNDBUF)
}
func (u *RawConn) getMemInfo(meminfo *_SK_MEMINFO) error {
var vallen uint32 = 4 * _SK_MEMINFO_VARS
_, _, err := unix.Syscall6(unix.SYS_GETSOCKOPT, uintptr(u.sysFd), uintptr(unix.SOL_SOCKET), uintptr(unix.SO_MEMINFO), uintptr(unsafe.Pointer(meminfo)), uintptr(unsafe.Pointer(&vallen)), 0)
if err != 0 {
return err
}
return nil
}
func NewRawStatsEmitter(rawConn *RawConn) func() {
// Check if our kernel supports SO_MEMINFO before registering the gauges
var gauges [_SK_MEMINFO_VARS]metrics.Gauge
var meminfo _SK_MEMINFO
if err := rawConn.getMemInfo(&meminfo); err == nil {
gauges = [_SK_MEMINFO_VARS]metrics.Gauge{
metrics.GetOrRegisterGauge("raw.rmem_alloc", nil),
metrics.GetOrRegisterGauge("raw.rcvbuf", nil),
metrics.GetOrRegisterGauge("raw.wmem_alloc", nil),
metrics.GetOrRegisterGauge("raw.sndbuf", nil),
metrics.GetOrRegisterGauge("raw.fwd_alloc", nil),
metrics.GetOrRegisterGauge("raw.wmem_queued", nil),
metrics.GetOrRegisterGauge("raw.optmem", nil),
metrics.GetOrRegisterGauge("raw.backlog", nil),
metrics.GetOrRegisterGauge("raw.drops", nil),
}
} else {
// return no-op because we don't support SO_MEMINFO
return func() {}
}
return func() {
if err := rawConn.getMemInfo(&meminfo); err == nil {
for j := 0; j < _SK_MEMINFO_VARS; j++ {
gauges[j].Update(int64(meminfo[j]))
}
}
}
}