cruft

fix tests

haha yep faster

checksum speed

haha

save pennies

fix

typo!

checkpt

GSO again
This commit is contained in:
JackDoan
2026-04-17 10:25:05 -05:00
parent a476b1fa07
commit 194d58cd46
14 changed files with 2746 additions and 16 deletions

70
batch.go Normal file
View File

@@ -0,0 +1,70 @@
package nebula
import "net/netip"
// sendBatchCap is the maximum number of encrypted packets accumulated before a
// flush is forced. TSO superpackets segment to at most ~45 packets on
// reasonable MTUs, so 128 leaves headroom without bloating the backing
// allocation.
const sendBatchCap = 128
// sendBatch accumulates encrypted UDP packets for a single sendmmsg flush.
// One sendBatch is owned by each listenIn goroutine; no locking is needed.
// The backing storage holds up to batchCap packets of slotCap bytes each;
// bufs and dsts are parallel slices of committed slots.
type sendBatch struct {
bufs [][]byte
dsts []netip.AddrPort
backing []byte
slotCap int
batchCap int
nextSlot int
}
func newSendBatch(batchCap, slotCap int) *sendBatch {
return &sendBatch{
bufs: make([][]byte, 0, batchCap),
dsts: make([]netip.AddrPort, 0, batchCap),
backing: make([]byte, batchCap*slotCap),
slotCap: slotCap,
batchCap: batchCap,
}
}
// Next returns a zero-length slice with slotCap capacity over the next unused
// slot's backing bytes. The caller writes into the returned slice and then
// calls Commit with the final length and destination. Next returns nil when
// the batch is full.
func (b *sendBatch) Next() []byte {
if b.nextSlot >= b.batchCap {
return nil
}
start := b.nextSlot * b.slotCap
return b.backing[start : start : start+b.slotCap]
}
// Commit records the slot just returned by Next as a packet of length n
// destined for dst.
func (b *sendBatch) Commit(n int, dst netip.AddrPort) {
start := b.nextSlot * b.slotCap
b.bufs = append(b.bufs, b.backing[start:start+n])
b.dsts = append(b.dsts, dst)
b.nextSlot++
}
// Reset clears committed slots; backing storage is retained for reuse.
func (b *sendBatch) Reset() {
b.bufs = b.bufs[:0]
b.dsts = b.dsts[:0]
b.nextSlot = 0
}
// Len returns the number of committed packets.
func (b *sendBatch) Len() int {
return len(b.bufs)
}
// Cap returns the maximum number of slots in the batch.
func (b *sendBatch) Cap() int {
return b.batchCap
}

69
batch_test.go Normal file
View File

@@ -0,0 +1,69 @@
package nebula
import (
"net/netip"
"testing"
)
func TestSendBatchBookkeeping(t *testing.T) {
b := newSendBatch(4, 32)
if b.Len() != 0 || b.Cap() != 4 {
t.Fatalf("fresh batch: len=%d cap=%d", b.Len(), b.Cap())
}
ap := netip.MustParseAddrPort("10.0.0.1:4242")
for i := 0; i < 4; i++ {
slot := b.Next()
if slot == nil {
t.Fatalf("slot %d: Next returned nil before cap", i)
}
if cap(slot) != 32 || len(slot) != 0 {
t.Fatalf("slot %d: got len=%d cap=%d want len=0 cap=32", i, len(slot), cap(slot))
}
// Write a marker byte.
slot = append(slot, byte(i), byte(i+1), byte(i+2))
b.Commit(len(slot), ap)
}
if b.Next() != nil {
t.Fatalf("Next should return nil when full")
}
if b.Len() != 4 {
t.Fatalf("Len=%d want 4", b.Len())
}
for i, buf := range b.bufs {
if len(buf) != 3 || buf[0] != byte(i) {
t.Errorf("buf %d: %x", i, buf)
}
if b.dsts[i] != ap {
t.Errorf("dst %d: got %v want %v", i, b.dsts[i], ap)
}
}
// Reset returns empty and Next works again.
b.Reset()
if b.Len() != 0 {
t.Fatalf("after Reset Len=%d want 0", b.Len())
}
slot := b.Next()
if slot == nil || cap(slot) != 32 {
t.Fatalf("after Reset Next nil or wrong cap: %v cap=%d", slot == nil, cap(slot))
}
}
func TestSendBatchSlotsDoNotOverlap(t *testing.T) {
b := newSendBatch(3, 8)
ap := netip.MustParseAddrPort("10.0.0.1:80")
// Fill three slots, each with its own sentinel byte.
for i := 0; i < 3; i++ {
s := b.Next()
s = append(s, byte(0xA0+i), byte(0xB0+i))
b.Commit(len(s), ap)
}
for i, buf := range b.bufs {
if buf[0] != byte(0xA0+i) || buf[1] != byte(0xB0+i) {
t.Errorf("slot %d corrupted: %x", i, buf)
}
}
}

View File

@@ -0,0 +1,484 @@
package coalesce
import (
"bytes"
"encoding/binary"
"io"
"github.com/slackhq/nebula/overlay/tio"
)
// ipProtoTCP is the IANA protocol number for TCP. Hardcoded instead of
// reaching for golang.org/x/sys/unix — that package doesn't define the
// constant on Windows, which would break cross-compiles even though this
// file runs unchanged on every platform.
const ipProtoTCP = 6
// tcpCoalesceBufSize caps total bytes per superpacket. Mirrors the kernel's
// sk_gso_max_size of ~64KiB; anything beyond this would be rejected anyway.
const tcpCoalesceBufSize = 65535
// tcpCoalesceMaxSegs caps how many segments we'll coalesce into a single
// superpacket. Keeping this well below the kernel's TSO ceiling bounds
// latency.
const tcpCoalesceMaxSegs = 64
// tcpCoalesceHdrCap is the scratch space we copy a seed's IP+TCP header
// into. IPv6 (40) + TCP with full options (60) = 100 bytes.
const tcpCoalesceHdrCap = 100
// initialSlots is the starting capacity of the slot pool. One flow per
// packet is the worst case so this matches a typical UDP recvmmsg batch.
const initialSlots = 64
// flowKey identifies a TCP flow by {src, dst, sport, dport, family}.
// Comparable, so linear scans over the slot list stay tight.
type flowKey struct {
src, dst [16]byte
sport, dport uint16
isV6 bool
}
// coalesceSlot is one entry in the coalescer's ordered event queue. When
// passthrough is true the slot holds a single borrowed packet that must be
// emitted verbatim (non-TCP, non-admissible TCP, or oversize seed). When
// passthrough is false the slot is an in-progress coalesced superpacket:
// hdrBuf is a mutable copy of the seed's IP+TCP header (we patch total
// length and pseudo-header partial at flush), and payIovs are *borrowed*
// slices from the caller's plaintext buffers — no payload is ever copied.
// The caller (listenOut) must keep those buffers alive until Flush.
type coalesceSlot struct {
passthrough bool
rawPkt []byte // borrowed when passthrough
fk flowKey
hdrBuf [tcpCoalesceHdrCap]byte
hdrLen int
ipHdrLen int
isV6 bool
gsoSize int
numSeg int
totalPay int
nextSeq uint32
// psh closes the chain: set when the last-accepted segment had PSH or
// was sub-gsoSize. No further appends after that.
psh bool
payIovs [][]byte
}
// TCPCoalescer accumulates adjacent in-flow TCP data segments across
// multiple concurrent flows and emits each flow's run as a single TSO
// superpacket via tio.GSOWriter. All output — coalesced or not — is
// deferred until Flush so arrival order is preserved on the wire. Owns
// no locks; one coalescer per TUN write queue.
type TCPCoalescer struct {
plainW io.Writer
gsoW tio.GSOWriter // nil when the queue doesn't support TSO
// slots is the ordered event queue. Flush walks it once and emits each
// entry as either a WriteGSO (coalesced) or a plainW.Write (passthrough).
slots []*coalesceSlot
// openSlots maps a flow key to its most recent non-sealed slot, so new
// segments can extend an in-progress superpacket in O(1). Slots are
// removed from this map when they close (PSH or short-last-segment),
// when a non-admissible packet for that flow arrives, or in Flush.
openSlots map[flowKey]*coalesceSlot
pool []*coalesceSlot // free list for reuse
}
func NewTCPCoalescer(w io.Writer) *TCPCoalescer {
c := &TCPCoalescer{
plainW: w,
slots: make([]*coalesceSlot, 0, initialSlots),
openSlots: make(map[flowKey]*coalesceSlot, initialSlots),
pool: make([]*coalesceSlot, 0, initialSlots),
}
if gw, ok := w.(tio.GSOWriter); ok && gw.GSOSupported() {
c.gsoW = gw
}
return c
}
// parsedTCP holds the fields extracted from a single parse so later steps
// (admission, slot lookup, canAppend) don't re-walk the header.
type parsedTCP struct {
fk flowKey
ipHdrLen int
tcpHdrLen int
hdrLen int
payLen int
seq uint32
flags byte
}
// parseTCPBase extracts the flow key and IP/TCP offsets for any TCP packet,
// regardless of whether it's admissible for coalescing. Returns ok=false
// for non-TCP or malformed input. Accepts IPv4 (no options, no fragmentation)
// and IPv6 (no extension headers).
func parseTCPBase(pkt []byte) (parsedTCP, bool) {
var p parsedTCP
if len(pkt) < 20 {
return p, false
}
v := pkt[0] >> 4
switch v {
case 4:
ihl := int(pkt[0]&0x0f) * 4
if ihl != 20 {
return p, false
}
if pkt[9] != ipProtoTCP {
return p, false
}
// Reject actual fragmentation (MF or non-zero frag offset).
if binary.BigEndian.Uint16(pkt[6:8])&0x3fff != 0 {
return p, false
}
totalLen := int(binary.BigEndian.Uint16(pkt[2:4]))
if totalLen > len(pkt) || totalLen < ihl {
return p, false
}
p.ipHdrLen = 20
p.fk.isV6 = false
copy(p.fk.src[:4], pkt[12:16])
copy(p.fk.dst[:4], pkt[16:20])
pkt = pkt[:totalLen]
case 6:
if len(pkt) < 40 {
return p, false
}
if pkt[6] != ipProtoTCP {
return p, false
}
payloadLen := int(binary.BigEndian.Uint16(pkt[4:6]))
if 40+payloadLen > len(pkt) {
return p, false
}
p.ipHdrLen = 40
p.fk.isV6 = true
copy(p.fk.src[:], pkt[8:24])
copy(p.fk.dst[:], pkt[24:40])
pkt = pkt[:40+payloadLen]
default:
return p, false
}
if len(pkt) < p.ipHdrLen+20 {
return p, false
}
tcpOff := int(pkt[p.ipHdrLen+12]>>4) * 4
if tcpOff < 20 || tcpOff > 60 {
return p, false
}
if len(pkt) < p.ipHdrLen+tcpOff {
return p, false
}
p.tcpHdrLen = tcpOff
p.hdrLen = p.ipHdrLen + tcpOff
p.payLen = len(pkt) - p.hdrLen
p.seq = binary.BigEndian.Uint32(pkt[p.ipHdrLen+4 : p.ipHdrLen+8])
p.flags = pkt[p.ipHdrLen+13]
p.fk.sport = binary.BigEndian.Uint16(pkt[p.ipHdrLen : p.ipHdrLen+2])
p.fk.dport = binary.BigEndian.Uint16(pkt[p.ipHdrLen+2 : p.ipHdrLen+4])
return p, true
}
// coalesceable reports whether a parsed TCP segment is eligible for
// coalescing. Accepts only ACK or ACK|PSH with a non-empty payload.
func (p parsedTCP) coalesceable() bool {
const ack = 0x10
const psh = 0x08
if p.flags&^(ack|psh) != 0 || p.flags&ack == 0 {
return false
}
return p.payLen > 0
}
// Add borrows pkt. The caller must keep pkt valid until the next Flush,
// whether or not the packet was coalesced — passthrough (non-admissible)
// packets are queued and written at Flush time, not synchronously.
func (c *TCPCoalescer) Add(pkt []byte) error {
if c.gsoW == nil {
c.addPassthrough(pkt)
return nil
}
info, ok := parseTCPBase(pkt)
if !ok {
// Non-TCP or malformed — can't possibly collide with an open flow.
c.addPassthrough(pkt)
return nil
}
if !info.coalesceable() {
// TCP but not admissible (SYN/FIN/RST/URG/CWR/ECE or zero-payload).
// Seal this flow's open slot so later in-flow packets don't extend
// it and accidentally reorder past this passthrough.
delete(c.openSlots, info.fk)
c.addPassthrough(pkt)
return nil
}
if open := c.openSlots[info.fk]; open != nil {
if c.canAppend(open, pkt, info) {
c.appendPayload(open, pkt, info)
if open.psh {
delete(c.openSlots, info.fk)
}
return nil
}
// Can't extend — seal it and fall through to seed a fresh slot.
delete(c.openSlots, info.fk)
}
c.seed(pkt, info)
return nil
}
// Flush emits every queued event in arrival order. Coalesced slots go out
// via WriteGSO; passthrough slots go out via plainW.Write. Returns the
// first error observed; keeps draining so one bad packet doesn't hold up
// the rest. After Flush returns, borrowed payload slices may be recycled.
func (c *TCPCoalescer) Flush() error {
var first error
for _, s := range c.slots {
var err error
if s.passthrough {
_, err = c.plainW.Write(s.rawPkt)
} else {
err = c.flushSlot(s)
}
if err != nil && first == nil {
first = err
}
c.release(s)
}
for i := range c.slots {
c.slots[i] = nil
}
c.slots = c.slots[:0]
for k := range c.openSlots {
delete(c.openSlots, k)
}
return first
}
func (c *TCPCoalescer) addPassthrough(pkt []byte) {
s := c.take()
s.passthrough = true
s.rawPkt = pkt
c.slots = append(c.slots, s)
}
func (c *TCPCoalescer) seed(pkt []byte, info parsedTCP) {
if info.hdrLen > tcpCoalesceHdrCap || info.hdrLen+info.payLen > tcpCoalesceBufSize {
// Pathological shape — can't fit our scratch, emit as-is.
c.addPassthrough(pkt)
return
}
s := c.take()
s.passthrough = false
s.rawPkt = nil
copy(s.hdrBuf[:], pkt[:info.hdrLen])
s.hdrLen = info.hdrLen
s.ipHdrLen = info.ipHdrLen
s.isV6 = info.fk.isV6
s.fk = info.fk
s.gsoSize = info.payLen
s.numSeg = 1
s.totalPay = info.payLen
s.nextSeq = info.seq + uint32(info.payLen)
s.psh = info.flags&0x08 != 0
s.payIovs = append(s.payIovs[:0], pkt[info.hdrLen:info.hdrLen+info.payLen])
c.slots = append(c.slots, s)
if !s.psh {
c.openSlots[info.fk] = s
}
}
// canAppend reports whether info's packet extends the slot's seed: same
// header shape and stable contents, adjacent seq, not oversized, chain not
// closed.
func (c *TCPCoalescer) canAppend(s *coalesceSlot, pkt []byte, info parsedTCP) bool {
if s.psh {
return false
}
if info.hdrLen != s.hdrLen {
return false
}
if info.seq != s.nextSeq {
return false
}
if s.numSeg >= tcpCoalesceMaxSegs {
return false
}
if info.payLen > s.gsoSize {
return false
}
if s.hdrLen+s.totalPay+info.payLen > tcpCoalesceBufSize {
return false
}
if !headersMatch(s.hdrBuf[:s.hdrLen], pkt[:info.hdrLen], s.isV6, s.ipHdrLen) {
return false
}
return true
}
func (c *TCPCoalescer) appendPayload(s *coalesceSlot, pkt []byte, info parsedTCP) {
s.payIovs = append(s.payIovs, pkt[info.hdrLen:info.hdrLen+info.payLen])
s.numSeg++
s.totalPay += info.payLen
s.nextSeq = info.seq + uint32(info.payLen)
if info.payLen < s.gsoSize || info.flags&0x08 != 0 {
s.psh = true
}
}
func (c *TCPCoalescer) take() *coalesceSlot {
if n := len(c.pool); n > 0 {
s := c.pool[n-1]
c.pool[n-1] = nil
c.pool = c.pool[:n-1]
return s
}
return &coalesceSlot{}
}
func (c *TCPCoalescer) release(s *coalesceSlot) {
s.passthrough = false
s.rawPkt = nil
for i := range s.payIovs {
s.payIovs[i] = nil
}
s.payIovs = s.payIovs[:0]
s.numSeg = 0
s.totalPay = 0
s.psh = false
c.pool = append(c.pool, s)
}
// flushSlot patches the header and calls WriteGSO. Does not remove the
// slot from c.slots.
func (c *TCPCoalescer) flushSlot(s *coalesceSlot) error {
total := s.hdrLen + s.totalPay
l4Len := total - s.ipHdrLen
hdr := s.hdrBuf[:s.hdrLen]
if s.isV6 {
binary.BigEndian.PutUint16(hdr[4:6], uint16(l4Len))
} else {
binary.BigEndian.PutUint16(hdr[2:4], uint16(total))
hdr[10] = 0
hdr[11] = 0
binary.BigEndian.PutUint16(hdr[10:12], ipv4HdrChecksum(hdr[:s.ipHdrLen]))
}
var psum uint32
if s.isV6 {
psum = pseudoSumIPv6(hdr[8:24], hdr[24:40], ipProtoTCP, l4Len)
} else {
psum = pseudoSumIPv4(hdr[12:16], hdr[16:20], ipProtoTCP, l4Len)
}
tcsum := s.ipHdrLen + 16
binary.BigEndian.PutUint16(hdr[tcsum:tcsum+2], foldOnceNoInvert(psum))
return c.gsoW.WriteGSO(hdr, s.payIovs, uint16(s.gsoSize), s.isV6, uint16(s.ipHdrLen))
}
// headersMatch compares two IP+TCP header prefixes for byte-for-byte
// equality on every field that must be identical across coalesced
// segments. Size/IPID/IPCsum/seq/flags/tcpCsum are masked out.
func headersMatch(a, b []byte, isV6 bool, ipHdrLen int) bool {
if len(a) != len(b) {
return false
}
if isV6 {
// IPv6: bytes [0:4] = version/TC/flow-label, [6:8] = next_hdr/hop,
// [8:40] = src+dst. Skip [4:6] payload length.
if !bytes.Equal(a[0:4], b[0:4]) {
return false
}
if !bytes.Equal(a[6:40], b[6:40]) {
return false
}
} else {
// IPv4: [0:2] version/IHL/TOS, [6:10] flags/fragoff/TTL/proto,
// [12:20] src+dst. Skip [2:4] total len, [4:6] id, [10:12] csum.
if !bytes.Equal(a[0:2], b[0:2]) {
return false
}
if !bytes.Equal(a[6:10], b[6:10]) {
return false
}
if !bytes.Equal(a[12:20], b[12:20]) {
return false
}
}
// TCP: compare [0:4] ports, [8:13] ack+dataoff, [14:16] window,
// [18:tcpHdrLen] options (incl. urgent).
tcp := ipHdrLen
if !bytes.Equal(a[tcp:tcp+4], b[tcp:tcp+4]) {
return false
}
if !bytes.Equal(a[tcp+8:tcp+13], b[tcp+8:tcp+13]) {
return false
}
if !bytes.Equal(a[tcp+14:tcp+16], b[tcp+14:tcp+16]) {
return false
}
if !bytes.Equal(a[tcp+18:], b[tcp+18:]) {
return false
}
return true
}
// ipv4HdrChecksum computes the IPv4 header checksum over hdr (which must
// already have its checksum field zeroed) and returns the folded/inverted
// 16-bit value to store.
func ipv4HdrChecksum(hdr []byte) uint16 {
var sum uint32
for i := 0; i+1 < len(hdr); i += 2 {
sum += uint32(binary.BigEndian.Uint16(hdr[i : i+2]))
}
if len(hdr)%2 == 1 {
sum += uint32(hdr[len(hdr)-1]) << 8
}
for sum>>16 != 0 {
sum = (sum & 0xffff) + (sum >> 16)
}
return ^uint16(sum)
}
// pseudoSumIPv4 / pseudoSumIPv6 build the TCP pseudo-header partial sum
// expected by the virtio NEEDS_CSUM kernel path: the 32-bit accumulator
// before folding.
func pseudoSumIPv4(src, dst []byte, proto byte, l4Len int) uint32 {
var sum uint32
sum += uint32(binary.BigEndian.Uint16(src[0:2]))
sum += uint32(binary.BigEndian.Uint16(src[2:4]))
sum += uint32(binary.BigEndian.Uint16(dst[0:2]))
sum += uint32(binary.BigEndian.Uint16(dst[2:4]))
sum += uint32(proto)
sum += uint32(l4Len)
return sum
}
func pseudoSumIPv6(src, dst []byte, proto byte, l4Len int) uint32 {
var sum uint32
for i := 0; i < 16; i += 2 {
sum += uint32(binary.BigEndian.Uint16(src[i : i+2]))
sum += uint32(binary.BigEndian.Uint16(dst[i : i+2]))
}
sum += uint32(l4Len >> 16)
sum += uint32(l4Len & 0xffff)
sum += uint32(proto)
return sum
}
// foldOnceNoInvert folds the 32-bit accumulator to 16 bits and returns it
// unchanged (no one's complement). This is what virtio NEEDS_CSUM wants in
// the L4 checksum field — the kernel will add the payload sum and invert.
func foldOnceNoInvert(sum uint32) uint16 {
for sum>>16 != 0 {
sum = (sum & 0xffff) + (sum >> 16)
}
return uint16(sum)
}

View File

@@ -0,0 +1,576 @@
package coalesce
import (
"encoding/binary"
"testing"
)
// fakeTunWriter records plain Writes and WriteGSO calls without touching a
// real TUN fd. WriteGSO preserves the split between hdr and borrowed pays
// so tests can inspect each independently.
type fakeTunWriter struct {
gsoEnabled bool
writes [][]byte
gsoWrites []fakeGSOWrite
}
type fakeGSOWrite struct {
hdr []byte
pays [][]byte
gsoSize uint16
isV6 bool
csumStart uint16
}
// total returns hdrLen + sum of pay lens.
func (g fakeGSOWrite) total() int {
n := len(g.hdr)
for _, p := range g.pays {
n += len(p)
}
return n
}
// payLen sums the pays.
func (g fakeGSOWrite) payLen() int {
var n int
for _, p := range g.pays {
n += len(p)
}
return n
}
func (w *fakeTunWriter) Write(p []byte) (int, error) {
buf := make([]byte, len(p))
copy(buf, p)
w.writes = append(w.writes, buf)
return len(p), nil
}
func (w *fakeTunWriter) WriteGSO(hdr []byte, pays [][]byte, gsoSize uint16, isV6 bool, csumStart uint16) error {
hcopy := make([]byte, len(hdr))
copy(hcopy, hdr)
paysCopy := make([][]byte, len(pays))
for i, p := range pays {
pc := make([]byte, len(p))
copy(pc, p)
paysCopy[i] = pc
}
w.gsoWrites = append(w.gsoWrites, fakeGSOWrite{
hdr: hcopy,
pays: paysCopy,
gsoSize: gsoSize,
isV6: isV6,
csumStart: csumStart,
})
return nil
}
func (w *fakeTunWriter) GSOSupported() bool { return w.gsoEnabled }
// buildTCPv4 constructs a minimal IPv4+TCP packet with the given payload,
// seq, and flags. Assumes no IP options and a 20-byte TCP header.
func buildTCPv4(seq uint32, flags byte, payload []byte) []byte {
return buildTCPv4Ports(1000, 2000, seq, flags, payload)
}
// buildTCPv4Ports is buildTCPv4 with caller-specified ports so tests can
// build distinct flows.
func buildTCPv4Ports(sport, dport uint16, seq uint32, flags byte, payload []byte) []byte {
const ipHdrLen = 20
const tcpHdrLen = 20
total := ipHdrLen + tcpHdrLen + len(payload)
pkt := make([]byte, total)
pkt[0] = 0x45
pkt[1] = 0x00
binary.BigEndian.PutUint16(pkt[2:4], uint16(total))
binary.BigEndian.PutUint16(pkt[4:6], 0)
binary.BigEndian.PutUint16(pkt[6:8], 0x4000)
pkt[8] = 64
pkt[9] = ipProtoTCP
copy(pkt[12:16], []byte{10, 0, 0, 1})
copy(pkt[16:20], []byte{10, 0, 0, 2})
binary.BigEndian.PutUint16(pkt[20:22], sport)
binary.BigEndian.PutUint16(pkt[22:24], dport)
binary.BigEndian.PutUint32(pkt[24:28], seq)
binary.BigEndian.PutUint32(pkt[28:32], 12345)
pkt[32] = 0x50
pkt[33] = flags
binary.BigEndian.PutUint16(pkt[34:36], 0xffff)
copy(pkt[40:], payload)
return pkt
}
const (
tcpAck = 0x10
tcpPsh = 0x08
tcpSyn = 0x02
tcpFin = 0x01
tcpAckPsh = tcpAck | tcpPsh
)
func TestCoalescerPassthroughWhenGSOUnavailable(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: false}
c := NewTCPCoalescer(w)
pkt := buildTCPv4(1000, tcpAck, []byte("hello"))
if err := c.Add(pkt); err != nil {
t.Fatal(err)
}
// No sync write — passthrough is deferred to Flush.
if len(w.writes) != 0 || len(w.gsoWrites) != 0 {
t.Fatalf("no Add-time writes: got writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
if len(w.writes) != 1 || len(w.gsoWrites) != 0 {
t.Fatalf("want single plain write, got writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
}
func TestCoalescerNonTCPPassthrough(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pkt := make([]byte, 28)
pkt[0] = 0x45
binary.BigEndian.PutUint16(pkt[2:4], 28)
pkt[9] = 1
copy(pkt[12:16], []byte{10, 0, 0, 1})
copy(pkt[16:20], []byte{10, 0, 0, 2})
if err := c.Add(pkt); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
if len(w.writes) != 1 || len(w.gsoWrites) != 0 {
t.Fatalf("ICMP should pass through unchanged")
}
}
func TestCoalescerSeedThenFlushAlone(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pkt := buildTCPv4(1000, tcpAck, make([]byte, 1000))
if err := c.Add(pkt); err != nil {
t.Fatal(err)
}
if len(w.writes) != 0 || len(w.gsoWrites) != 0 {
t.Fatalf("unexpected output before flush")
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// Single-segment flush now goes through WriteGSO with GSO_NONE
// (virtio NEEDS_CSUM lets the kernel fill in the L4 csum).
if len(w.gsoWrites) != 1 || len(w.writes) != 0 {
t.Fatalf("single-seg flush: writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
g := w.gsoWrites[0]
if g.total() != 40+1000 {
t.Errorf("super total=%d want %d", g.total(), 40+1000)
}
if g.payLen() != 1000 {
t.Errorf("payLen=%d want 1000", g.payLen())
}
}
func TestCoalescerCoalescesAdjacentACKs(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 1200)
if err := c.Add(buildTCPv4(1000, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4(2200, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4(3400, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
if len(w.gsoWrites) != 1 {
t.Fatalf("want 1 gso write, got %d (plain=%d)", len(w.gsoWrites), len(w.writes))
}
g := w.gsoWrites[0]
if g.gsoSize != 1200 {
t.Errorf("gsoSize=%d want 1200", g.gsoSize)
}
if len(g.hdr) != 40 {
t.Errorf("hdrLen=%d want 40", len(g.hdr))
}
if g.csumStart != 20 {
t.Errorf("csumStart=%d want 20", g.csumStart)
}
if len(g.pays) != 3 {
t.Errorf("pay count=%d want 3", len(g.pays))
}
if g.total() != 40+3*1200 {
t.Errorf("superpacket len=%d want %d", g.total(), 40+3*1200)
}
if tot := binary.BigEndian.Uint16(g.hdr[2:4]); int(tot) != g.total() {
t.Errorf("ip total_length=%d want %d", tot, g.total())
}
}
func TestCoalescerRejectsSeqGap(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 1200)
if err := c.Add(buildTCPv4(1000, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4(3000, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// Each packet flushes as its own single-segment WriteGSO now.
if len(w.gsoWrites) != 2 || len(w.writes) != 0 {
t.Fatalf("seq gap: want 2 gso writes got writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
}
func TestCoalescerRejectsFlagMismatch(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 1200)
if err := c.Add(buildTCPv4(1000, tcpAck, pay)); err != nil {
t.Fatal(err)
}
// SYN|ACK is non-admissible. Must flush matching flow's slot (gso)
// and then plain-write the SYN packet itself.
syn := buildTCPv4(2200, tcpSyn|tcpAck, pay)
if err := c.Add(syn); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
if len(w.writes) != 1 || len(w.gsoWrites) != 1 {
t.Fatalf("flag mismatch: want 1 plain + 1 gso, got writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
}
func TestCoalescerRejectsFIN(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
fin := buildTCPv4(1000, tcpAck|tcpFin, []byte("x"))
if err := c.Add(fin); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// FIN isn't admissible — passthrough as plain, no slot, no gso.
if len(w.writes) != 1 || len(w.gsoWrites) != 0 {
t.Fatalf("FIN should be passthrough, got writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
}
func TestCoalescerShortLastSegmentClosesChain(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
full := make([]byte, 1200)
half := make([]byte, 500)
if err := c.Add(buildTCPv4(1000, tcpAck, full)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4(2200, tcpAck, half)); err != nil {
t.Fatal(err)
}
// Chain now closed; next packet seeds a new slot on the same flow
// after flushing the old one.
if err := c.Add(buildTCPv4(2700, tcpAck, full)); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// Expect two gso writes: the first two packets coalesced, then the
// third flushed alone (single-seg via GSO_NONE).
if len(w.gsoWrites) != 2 {
t.Fatalf("want 2 gso writes got %d", len(w.gsoWrites))
}
if len(w.writes) != 0 {
t.Fatalf("want 0 plain writes got %d", len(w.writes))
}
if w.gsoWrites[0].gsoSize != 1200 {
t.Errorf("gsoSize=%d want 1200", w.gsoWrites[0].gsoSize)
}
if got, want := w.gsoWrites[0].total(), 40+1200+500; got != want {
t.Errorf("super len=%d want %d", got, want)
}
}
func TestCoalescerPSHFinalizesChain(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 1200)
if err := c.Add(buildTCPv4(1000, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4(2200, tcpAckPsh, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4(3400, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// First two coalesce; the third seeds a fresh slot that flushes alone.
if len(w.gsoWrites) != 2 {
t.Fatalf("want 2 gso writes got %d", len(w.gsoWrites))
}
if len(w.writes) != 0 {
t.Fatalf("want 0 plain writes got %d", len(w.writes))
}
}
func TestCoalescerRejectsDifferentFlow(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 1200)
p1 := buildTCPv4(1000, tcpAck, pay)
p2 := buildTCPv4(2200, tcpAck, pay)
binary.BigEndian.PutUint16(p2[20:22], 9999)
if err := c.Add(p1); err != nil {
t.Fatal(err)
}
if err := c.Add(p2); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// Two independent flows, each flushes its own single-segment WriteGSO.
if len(w.gsoWrites) != 2 || len(w.writes) != 0 {
t.Fatalf("diff flow: want 2 gso writes got writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
}
func TestCoalescerRejectsIPOptions(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 500)
pkt := buildTCPv4(1000, tcpAck, pay)
// Bump IHL to 6 to simulate 4 bytes of IP options. Don't actually add
// bytes — parser should bail before it matters.
pkt[0] = 0x46
if err := c.Add(pkt); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// Non-admissible parse → passthrough as plain.
if len(w.writes) != 1 || len(w.gsoWrites) != 0 {
t.Fatalf("IP options should passthrough, got writes=%d gso=%d", len(w.writes), len(w.gsoWrites))
}
}
func TestCoalescerCapBySegments(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 512)
seq := uint32(1000)
for i := 0; i < tcpCoalesceMaxSegs+5; i++ {
if err := c.Add(buildTCPv4(seq, tcpAck, pay)); err != nil {
t.Fatal(err)
}
seq += uint32(len(pay))
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
for _, g := range w.gsoWrites {
segs := len(g.pays)
if segs > tcpCoalesceMaxSegs {
t.Fatalf("super exceeded seg cap: %d > %d", segs, tcpCoalesceMaxSegs)
}
}
}
// TestCoalescerMultipleFlowsInSameBatch proves two interleaved bulk TCP
// flows coalesce independently in a single Flush.
func TestCoalescerMultipleFlowsInSameBatch(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 1200)
// Flow A: sport 1000. Flow B: sport 3000.
if err := c.Add(buildTCPv4Ports(1000, 2000, 100, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(3000, 2000, 500, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(1000, 2000, 1300, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(3000, 2000, 1700, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(1000, 2000, 2500, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(3000, 2000, 2900, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
if len(w.gsoWrites) != 2 {
t.Fatalf("want 2 gso writes (one per flow), got %d", len(w.gsoWrites))
}
if len(w.writes) != 0 {
t.Fatalf("want no plain writes, got %d", len(w.writes))
}
// Each superpacket should carry 3 segments.
for i, g := range w.gsoWrites {
if len(g.pays) != 3 {
t.Errorf("gso[%d]: segs=%d want 3", i, len(g.pays))
}
if g.gsoSize != 1200 {
t.Errorf("gso[%d]: gsoSize=%d want 1200", i, g.gsoSize)
}
}
// Verify each superpacket carries the source port it was seeded with.
seenSports := map[uint16]bool{}
for _, g := range w.gsoWrites {
sp := binary.BigEndian.Uint16(g.hdr[20:22])
seenSports[sp] = true
}
if !seenSports[1000] || !seenSports[3000] {
t.Errorf("expected superpackets for sports 1000 and 3000, got %v", seenSports)
}
}
// TestCoalescerPreservesArrivalOrder confirms that with passthrough and
// coalesced events both queued, Flush emits them in Add order rather than
// writing passthrough packets synchronously.
func TestCoalescerPreservesArrivalOrder(t *testing.T) {
w := &orderedFakeWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
// Sequence: coalesceable TCP, ICMP (passthrough), coalesceable TCP on
// a different flow. Expected emit order: gso(X), plain(ICMP), gso(Y).
pay := make([]byte, 1200)
if err := c.Add(buildTCPv4Ports(1000, 2000, 100, tcpAck, pay)); err != nil {
t.Fatal(err)
}
icmp := make([]byte, 28)
icmp[0] = 0x45
binary.BigEndian.PutUint16(icmp[2:4], 28)
icmp[9] = 1
copy(icmp[12:16], []byte{10, 0, 0, 1})
copy(icmp[16:20], []byte{10, 0, 0, 3})
if err := c.Add(icmp); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(3000, 2000, 500, tcpAck, pay)); err != nil {
t.Fatal(err)
}
// Nothing should have hit the writer synchronously.
if len(w.events) != 0 {
t.Fatalf("Add emitted events synchronously: %v", w.events)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
if got, want := w.events, []string{"gso", "plain", "gso"}; !stringSliceEq(got, want) {
t.Fatalf("flush order=%v want %v", got, want)
}
}
// orderedFakeWriter records only the sequence of call types so tests can
// assert arrival order without inspecting bytes.
type orderedFakeWriter struct {
gsoEnabled bool
events []string
}
func (w *orderedFakeWriter) Write(p []byte) (int, error) {
w.events = append(w.events, "plain")
return len(p), nil
}
func (w *orderedFakeWriter) WriteGSO(hdr []byte, pays [][]byte, gsoSize uint16, isV6 bool, csumStart uint16) error {
w.events = append(w.events, "gso")
return nil
}
func (w *orderedFakeWriter) GSOSupported() bool { return w.gsoEnabled }
func stringSliceEq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// TestCoalescerInterleavedFlowsPreserveOrdering checks that a non-admissible
// packet (SYN) mid-flow only flushes its own flow, not others.
func TestCoalescerInterleavedFlowsPreserveOrdering(t *testing.T) {
w := &fakeTunWriter{gsoEnabled: true}
c := NewTCPCoalescer(w)
pay := make([]byte, 1200)
// Flow A two segments.
if err := c.Add(buildTCPv4Ports(1000, 2000, 100, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(1000, 2000, 1300, tcpAck, pay)); err != nil {
t.Fatal(err)
}
// Flow B two segments.
if err := c.Add(buildTCPv4Ports(3000, 2000, 500, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Add(buildTCPv4Ports(3000, 2000, 1700, tcpAck, pay)); err != nil {
t.Fatal(err)
}
// Flow A SYN (non-admissible) — must flush only flow A's slot.
syn := buildTCPv4Ports(1000, 2000, 9999, tcpSyn|tcpAck, pay)
if err := c.Add(syn); err != nil {
t.Fatal(err)
}
// Flow B continues — should still be coalesced with its seed.
if err := c.Add(buildTCPv4Ports(3000, 2000, 2900, tcpAck, pay)); err != nil {
t.Fatal(err)
}
if err := c.Flush(); err != nil {
t.Fatal(err)
}
// Expected:
// - 1 gso for flow A (first 2 segments)
// - 1 plain for flow A SYN
// - 1 gso for flow B (3 segments)
if len(w.gsoWrites) != 2 {
t.Fatalf("want 2 gso writes, got %d", len(w.gsoWrites))
}
if len(w.writes) != 1 {
t.Fatalf("want 1 plain write (SYN), got %d", len(w.writes))
}
// Find the 3-segment gso (flow B) and the 2-segment gso (flow A).
var segCounts []int
for _, g := range w.gsoWrites {
segCounts = append(segCounts, len(g.pays))
}
if !(segCounts[0] == 2 && segCounts[1] == 3) && !(segCounts[0] == 3 && segCounts[1] == 2) {
t.Errorf("unexpected segment counts: %v (want 2 and 3)", segCounts)
}
}

View File

@@ -0,0 +1,70 @@
package tio
import (
"encoding/binary"
"errors"
"fmt"
"golang.org/x/sys/unix"
)
type offloadContainer struct {
pq []*Offload
// pqi is exactly the same as pq, but stored as the interface type
pqi []Queue
shutdownFd int
}
func NewOffloadContainer() (Container, error) {
shutdownFd, err := unix.Eventfd(0, unix.EFD_NONBLOCK|unix.EFD_CLOEXEC)
if err != nil {
return nil, fmt.Errorf("failed to create eventfd: %w", err)
}
out := &offloadContainer{
pq: []*Offload{},
pqi: []Queue{},
shutdownFd: shutdownFd,
}
return out, nil
}
func (c *offloadContainer) Queues() []Queue {
return c.pqi
}
func (c *offloadContainer) Add(fd int) error {
x, err := newOffload(fd, c.shutdownFd)
if err != nil {
return err
}
c.pq = append(c.pq, x)
c.pqi = append(c.pqi, x)
return nil
}
func (c *offloadContainer) wakeForShutdown() error {
var buf [8]byte
binary.NativeEndian.PutUint64(buf[:], 1)
_, err := unix.Write(c.shutdownFd, buf[:])
return err
}
func (c *offloadContainer) Close() error {
errs := []error{}
// Signal all readers blocked in poll to wake up and exit
if err := c.wakeForShutdown(); err != nil {
errs = append(errs, err)
}
for _, x := range c.pq {
if err := x.Close(); err != nil {
errs = append(errs, err)
}
}
return errors.Join(errs...)
}

View File

@@ -13,6 +13,8 @@ type Container interface {
// Add takes a tun fd, adds it to the container, and prepares it for use as a Queue
Add(fd int) error
io.Closer
}
// Queue is a readable/writable Poll queue. One Queue is driven by a single

View File

@@ -0,0 +1,374 @@
package tio
import (
"fmt"
"io"
"os"
"sync/atomic"
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
// Space for segmented output. Worst case is many small segments, each paying
// an IP+TCP header. Should be a multiple of 64KiB.
// const tunSegBufSize = 0xffff * 8 TODO larger? config?
const tunSegBufSize = 131072
// tunSegBufCap is the total size we allocate for the per-reader segment
// buffer. It is sized as one worst-case TSO superpacket (tunSegBufSize) plus
// the same again as drain headroom so a Read wake can accumulate
// additional packets after an initial big read without overflowing.
const tunSegBufCap = tunSegBufSize * 2
// tunDrainCap caps how many packets a single Read will accumulate via
// the post-wake drain loop. Sized to soak up a burst of small ACKs while
// bounding how much work a single caller holds before handing off.
const tunDrainCap = 64 //256
// gsoInitialPayIovs is the starting capacity (in payload fragments) of
// Offload.gsoIovs. Sized to cover the default coalesce segment cap without
// any reallocations.
const gsoInitialPayIovs = 66
// validVnetHdr is the 10-byte virtio_net_hdr we prepend to every non-GSO TUN
// write. Only flag set is VIRTIO_NET_HDR_F_DATA_VALID, which marks the skb
// CHECKSUM_UNNECESSARY so the receiving network stack skips L4 checksum
// verification. All packets that reach the plain Write / WriteReject paths
// already carry a valid L4 checksum (either supplied by a remote peer whose
// ciphertext we AEAD-authenticated, or produced by finishChecksum during TSO
// segmentation, or built locally by CreateRejectPacket), so trusting them is
// safe.
var validVnetHdr = [virtioNetHdrLen]byte{unix.VIRTIO_NET_HDR_F_DATA_VALID}
// Offload wraps a TUN file descriptor with poll-based reads. The FD provided will be changed to non-blocking.
// A shared eventfd allows Close to wake all readers blocked in poll.
type Offload struct {
fd int
shutdownFd int
readPoll [2]unix.PollFd
writePoll [2]unix.PollFd
closed atomic.Bool
readBuf []byte // scratch for a single raw read (virtio hdr + superpacket)
segBuf []byte // backing store for segmented output
segOff int // cursor into segBuf for the current Read drain
pending [][]byte // segments returned from the most recent Read
writeIovs [2]unix.Iovec // preallocated iovecs for Write (coalescer passthrough); iovs[0] is fixed to validVnetHdr
// rejectIovs is a second preallocated iovec scratch used exclusively by
// WriteReject (reject + self-forward from the inside path). It mirrors
// writeIovs but lets listenIn goroutines emit reject packets without
// racing with the listenOut coalescer that owns writeIovs.
rejectIovs [2]unix.Iovec
// gsoHdrBuf is a per-queue 10-byte scratch for the virtio_net_hdr emitted
// by WriteGSO. Separate from validVnetHdr so a concurrent non-GSO Write on
// another queue never observes a half-written header.
gsoHdrBuf [virtioNetHdrLen]byte
// gsoIovs is the writev iovec scratch for WriteGSO. Sized to hold the
// virtio header + IP/TCP header + up to gsoInitialPayIovs payload
// fragments; grown on demand if a coalescer pushes more.
gsoIovs []unix.Iovec
}
func newOffload(fd int, shutdownFd int) (*Offload, error) {
if err := unix.SetNonblock(fd, true); err != nil {
return nil, fmt.Errorf("failed to set tun fd non-blocking: %w", err)
}
out := &Offload{
fd: fd,
shutdownFd: shutdownFd,
closed: atomic.Bool{},
readBuf: make([]byte, tunReadBufSize),
readPoll: [2]unix.PollFd{
{Fd: int32(fd), Events: unix.POLLIN},
{Fd: int32(shutdownFd), Events: unix.POLLIN},
},
writePoll: [2]unix.PollFd{
{Fd: int32(fd), Events: unix.POLLOUT},
{Fd: int32(shutdownFd), Events: unix.POLLIN},
},
segBuf: make([]byte, tunSegBufCap),
gsoIovs: make([]unix.Iovec, 2, 2+gsoInitialPayIovs),
}
out.writeIovs[0].Base = &validVnetHdr[0]
out.writeIovs[0].SetLen(virtioNetHdrLen)
out.rejectIovs[0].Base = &validVnetHdr[0]
out.rejectIovs[0].SetLen(virtioNetHdrLen)
out.gsoIovs[0].Base = &out.gsoHdrBuf[0]
out.gsoIovs[0].SetLen(virtioNetHdrLen)
return out, nil
}
func (r *Offload) blockOnRead() error {
const problemFlags = unix.POLLHUP | unix.POLLNVAL | unix.POLLERR
var err error
for {
_, err = unix.Poll(r.readPoll[:], -1)
if err != unix.EINTR {
break
}
}
//always reset these!
tunEvents := r.readPoll[0].Revents
shutdownEvents := r.readPoll[1].Revents
r.readPoll[0].Revents = 0
r.readPoll[1].Revents = 0
//do the err check before trusting the potentially bogus bits we just got
if err != nil {
return err
}
if shutdownEvents&(unix.POLLIN|problemFlags) != 0 {
return os.ErrClosed
} else if tunEvents&problemFlags != 0 {
return os.ErrClosed
}
return nil
}
func (r *Offload) blockOnWrite() error {
const problemFlags = unix.POLLHUP | unix.POLLNVAL | unix.POLLERR
var err error
for {
_, err = unix.Poll(r.writePoll[:], -1)
if err != unix.EINTR {
break
}
}
//always reset these!
tunEvents := r.writePoll[0].Revents
shutdownEvents := r.writePoll[1].Revents
r.writePoll[0].Revents = 0
r.writePoll[1].Revents = 0
//do the err check before trusting the potentially bogus bits we just got
if err != nil {
return err
}
if shutdownEvents&(unix.POLLIN|problemFlags) != 0 {
return os.ErrClosed
} else if tunEvents&problemFlags != 0 {
return os.ErrClosed
}
return nil
}
func (r *Offload) readRaw(buf []byte) (int, error) {
for {
if n, err := unix.Read(r.fd, buf); err == nil {
return n, nil
} else if err == unix.EAGAIN {
if err = r.blockOnRead(); err != nil {
return 0, err
}
continue
} else if err == unix.EINTR {
continue
} else if err == unix.EBADF {
return 0, os.ErrClosed
} else {
return 0, err
}
}
}
// Read reads one or more superpackets from the tun and returns the
// resulting packets. The first read blocks via poll; once the fd is known
// readable we drain additional packets non-blocking until the kernel queue
// is empty (EAGAIN), we've collected tunDrainCap packets, or we're out of
// segBuf headroom. This amortizes the poll wake over bursts of small
// packets (e.g. TCP ACKs). Slices point into the Offload's internal buffers
// and are only valid until the next Read or Close on this Queue.
func (r *Offload) Read() ([][]byte, error) {
r.pending = r.pending[:0]
r.segOff = 0
// Initial (blocking) read. Retry on decode errors so a single bad
// packet does not stall the reader.
for {
n, err := r.readRaw(r.readBuf)
if err != nil {
return nil, err
}
if err := r.decodeRead(n); err != nil {
// Drop and read again — a bad packet should not kill the reader.
continue
}
break
}
// Drain: non-blocking reads until the kernel queue is empty, the drain
// cap is reached, or segBuf no longer has room for another worst-case
// superpacket.
for len(r.pending) < tunDrainCap && tunSegBufCap-r.segOff >= tunSegBufSize {
n, err := unix.Read(r.fd, r.readBuf)
if err != nil {
// EAGAIN / EINTR / anything else: stop draining. We already
// have a valid batch from the first read.
break
}
if n <= 0 {
break
}
if err := r.decodeRead(n); err != nil {
// Drop this packet and stop the drain; we'd rather hand off
// what we have than keep spinning here.
break
}
}
return r.pending, nil
}
// decodeRead decodes the virtio header plus payload in r.readBuf[:n], appends
// the segments to r.pending, and advances r.segOff by the total scratch used.
// Caller must have already ensured r.vnetHdr is true.
func (r *Offload) decodeRead(n int) error {
if n < virtioNetHdrLen {
return fmt.Errorf("short tun read: %d < %d", n, virtioNetHdrLen)
}
var hdr VirtioNetHdr
hdr.decode(r.readBuf[:virtioNetHdrLen])
before := len(r.pending)
if err := segmentInto(r.readBuf[virtioNetHdrLen:n], hdr, &r.pending, r.segBuf[r.segOff:]); err != nil {
return err
}
for k := before; k < len(r.pending); k++ {
r.segOff += len(r.pending[k])
}
return nil
}
func (r *Offload) Write(buf []byte) (int, error) {
return r.writeWithScratch(buf, &r.writeIovs)
}
// WriteReject emits a packet using a dedicated iovec scratch (rejectIovs)
// distinct from the one used by the coalescer's Write path. This avoids a
// data race between the inside (listenIn) goroutine emitting reject or
// self-forward packets and the outside (listenOut) goroutine flushing TCP
// coalescer passthroughs on the same Offload.
func (r *Offload) WriteReject(buf []byte) (int, error) {
return r.writeWithScratch(buf, &r.rejectIovs)
}
func (r *Offload) writeWithScratch(buf []byte, iovs *[2]unix.Iovec) (int, error) {
if len(buf) == 0 {
return 0, nil
}
// Point the payload iovec at the caller's buffer. iovs[0] is pre-wired
// to validVnetHdr during Offload construction so we don't rebuild it here.
iovs[1].Base = &buf[0]
iovs[1].SetLen(len(buf))
iovPtr := unsafe.Pointer(&iovs[0])
return r.rawWrite(iovPtr, 2)
}
func (r *Offload) rawWrite(iovs unsafe.Pointer, iovcnt int) (int, error) {
for {
n, _, errno := syscall.Syscall(unix.SYS_WRITEV, uintptr(r.fd), uintptr(iovs), uintptr(iovcnt))
if errno == 0 {
if int(n) < virtioNetHdrLen {
return 0, io.ErrShortWrite
}
return int(n) - virtioNetHdrLen, nil
}
if errno == unix.EAGAIN {
if err := r.blockOnWrite(); err != nil {
return 0, err
}
continue
}
if errno == unix.EINTR {
continue
}
if errno == unix.EBADF {
return 0, os.ErrClosed
}
return 0, errno
}
}
// GSOSupported reports whether this queue was opened with IFF_VNET_HDR and
// can accept WriteGSO. When false, callers should fall back to per-segment
// Write calls.
func (r *Offload) GSOSupported() bool { return true }
// WriteGSO emits a TCP TSO superpacket in a single writev. hdr is the
// IPv4/IPv6 + TCP header prefix (already finalized — total length, IP csum,
// and TCP pseudo-header partial set by the caller). pays are payload
// fragments whose concatenation forms the full coalesced payload; each
// slice is read-only and must stay valid until return. gsoSize is the MSS;
// every segment except possibly the last is exactly gsoSize bytes.
// csumStart is the byte offset where the TCP header begins within hdr.
func (r *Offload) WriteGSO(hdr []byte, pays [][]byte, gsoSize uint16, isV6 bool, csumStart uint16) error {
if len(hdr) == 0 || len(pays) == 0 {
return nil
}
// Build the virtio_net_hdr. When pays total to <= gsoSize the kernel
// would produce a single segment; keep NEEDS_CSUM semantics but skip
// the GSO type so the kernel doesn't spuriously mark this as TSO.
vhdr := VirtioNetHdr{
Flags: unix.VIRTIO_NET_HDR_F_NEEDS_CSUM,
HdrLen: uint16(len(hdr)),
GSOSize: gsoSize,
CsumStart: csumStart,
CsumOffset: 16, // TCP checksum field lives 16 bytes into the TCP header
}
var totalPay int
for _, p := range pays {
totalPay += len(p)
}
if totalPay > int(gsoSize) {
if isV6 {
vhdr.GSOType = unix.VIRTIO_NET_HDR_GSO_TCPV6
} else {
vhdr.GSOType = unix.VIRTIO_NET_HDR_GSO_TCPV4
}
} else {
vhdr.GSOType = unix.VIRTIO_NET_HDR_GSO_NONE
vhdr.GSOSize = 0
}
vhdr.encode(r.gsoHdrBuf[:])
// Build the iovec array: [virtio_hdr, hdr, pays...]. r.gsoIovs[0] is
// wired to gsoHdrBuf at construction and never changes.
need := 2 + len(pays)
if cap(r.gsoIovs) < need {
grown := make([]unix.Iovec, need)
grown[0] = r.gsoIovs[0]
r.gsoIovs = grown
} else {
r.gsoIovs = r.gsoIovs[:need]
}
r.gsoIovs[1].Base = &hdr[0]
r.gsoIovs[1].SetLen(len(hdr))
for i, p := range pays {
r.gsoIovs[2+i].Base = &p[0]
r.gsoIovs[2+i].SetLen(len(p))
}
iovPtr := unsafe.Pointer(&r.gsoIovs[0])
iovCnt := len(r.gsoIovs)
_, err := r.rawWrite(iovPtr, iovCnt)
return err
}
func (r *Offload) Close() error {
if r.closed.Swap(true) {
return nil
}
//shutdownFd is owned by the container, so we should not close it
var err error
if r.fd >= 0 {
err = unix.Close(r.fd)
r.fd = -1
}
return err
}

View File

@@ -0,0 +1,239 @@
//go:build linux && !android && !e2e_testing
// +build linux,!android,!e2e_testing
package tio
import (
"encoding/binary"
"fmt"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/tcpip/checksum"
)
// segmentInto splits a TUN-side packet described by hdr into one or more
// IP packets, each appended to *out as a slice of scratch. scratch must be
// sized to hold every segment (including replicated headers).
func segmentInto(pkt []byte, hdr VirtioNetHdr, out *[][]byte, scratch []byte) error {
// When RSC_INFO is set the csum_start/csum_offset fields are repurposed to
// carry coalescing info rather than checksum offsets. A TUN writing via
// IFF_VNET_HDR should never emit this, but if it did we would silently
// miscompute the segment checksums — refuse the packet instead.
if hdr.Flags&unix.VIRTIO_NET_HDR_F_RSC_INFO != 0 {
return fmt.Errorf("virtio RSC_INFO flag not supported on TUN reads")
}
switch hdr.GSOType {
case unix.VIRTIO_NET_HDR_GSO_NONE:
if len(pkt) > len(scratch) {
return fmt.Errorf("packet larger than segment buffer: %d > %d", len(pkt), len(scratch))
}
copy(scratch, pkt)
seg := scratch[:len(pkt)]
if hdr.Flags&unix.VIRTIO_NET_HDR_F_NEEDS_CSUM != 0 {
if err := finishChecksum(seg, hdr); err != nil {
return err
}
}
*out = append(*out, seg)
return nil
case unix.VIRTIO_NET_HDR_GSO_TCPV4, unix.VIRTIO_NET_HDR_GSO_TCPV6:
return segmentTCP(pkt, hdr, out, scratch)
default:
return fmt.Errorf("unsupported virtio gso type: %d", hdr.GSOType)
}
}
// finishChecksum computes the L4 checksum for a non-GSO packet that the kernel
// handed us with NEEDS_CSUM set. csum_start / csum_offset point at the 16-bit
// checksum field; we zero it, fold a full sum (the field was pre-loaded with
// the pseudo-header partial sum by the kernel), and store the result.
func finishChecksum(seg []byte, hdr VirtioNetHdr) error {
cs := int(hdr.CsumStart)
co := int(hdr.CsumOffset)
if cs+co+2 > len(seg) {
return fmt.Errorf("csum offsets out of range: start=%d offset=%d len=%d", cs, co, len(seg))
}
// The kernel stores a partial pseudo-header sum at [cs+co:]; sum over the
// L4 region starting at cs, folding the prior partial in as the seed.
partial := binary.BigEndian.Uint16(seg[cs+co : cs+co+2])
seg[cs+co] = 0
seg[cs+co+1] = 0
binary.BigEndian.PutUint16(seg[cs+co:cs+co+2], ^checksum.Checksum(seg[cs:], partial))
return nil
}
// segmentTCP software-segments a TSO superpacket into one IP packet per MSS
// chunk. The caller guarantees hdr.GSOType is TCPV4 or TCPV6.
//
// Hot-path shape: the per-segment loop only sums the payload chunk. The TCP
// header, the IPv4 header, and the pseudo-header src/dst/proto contributions
// are each summed once up front — every segment reuses those three pre-folded
// uint32 values and combines them with small per-segment deltas (seq, flags,
// tcpLen, ip_id, total_len) that are cheap to fold in.
func segmentTCP(pkt []byte, hdr VirtioNetHdr, out *[][]byte, scratch []byte) error {
if hdr.GSOSize == 0 {
return fmt.Errorf("gso_size is zero")
}
if int(hdr.HdrLen) > len(pkt) || hdr.HdrLen == 0 {
return fmt.Errorf("hdr_len %d out of range (pkt %d)", hdr.HdrLen, len(pkt))
}
if hdr.CsumStart == 0 || hdr.CsumStart >= hdr.HdrLen {
return fmt.Errorf("csum_start %d out of range (hdr_len %d)", hdr.CsumStart, hdr.HdrLen)
}
isV4 := hdr.GSOType == unix.VIRTIO_NET_HDR_GSO_TCPV4
headerLen := int(hdr.HdrLen)
csumStart := int(hdr.CsumStart)
if isV4 && csumStart < 20 {
return fmt.Errorf("csum_start %d too small for IPv4", csumStart)
}
if !isV4 && csumStart < 40 {
return fmt.Errorf("csum_start %d too small for IPv6", csumStart)
}
tcpHdrLen := headerLen - csumStart
if tcpHdrLen < 20 {
return fmt.Errorf("tcp header region too small: %d", tcpHdrLen)
}
payload := pkt[headerLen:]
payLen := len(payload)
gso := int(hdr.GSOSize)
numSeg := (payLen + gso - 1) / gso
if numSeg == 0 {
numSeg = 1
}
need := numSeg*headerLen + payLen
if need > len(scratch) {
return fmt.Errorf("scratch too small for %d segments: need %d have %d", numSeg, need, len(scratch))
}
origSeq := binary.BigEndian.Uint32(pkt[csumStart+4 : csumStart+8])
origFlags := pkt[csumStart+13]
const tcpFinPsh = 0x09 // FIN(0x01) | PSH(0x08)
// Precompute the TCP header sum with seq/flags/csum zeroed. The max TCP
// header is 60 bytes; copy onto the stack, zero the per-segment-varying
// fields, sum once.
var tmp [60]byte
copy(tmp[:tcpHdrLen], pkt[csumStart:headerLen])
tmp[4], tmp[5], tmp[6], tmp[7] = 0, 0, 0, 0 // seq
tmp[13] = 0 // flags
tmp[16], tmp[17] = 0, 0 // csum
baseTcpHdrSum := uint32(checksum.Checksum(tmp[:tcpHdrLen], 0))
// Pseudo-header src+dst+proto contribution (tcpLen varies per segment).
var baseProtoSum uint32
if isV4 {
baseProtoSum = uint32(checksum.Checksum(pkt[12:20], 0))
} else {
baseProtoSum = uint32(checksum.Checksum(pkt[8:40], 0))
}
baseProtoSum += uint32(unix.IPPROTO_TCP)
// Precompute IPv4 header sum with total_len/id/csum zeroed.
var origIPID uint16
var ihl int
var baseIPHdrSum uint32
if isV4 {
origIPID = binary.BigEndian.Uint16(pkt[4:6])
ihl = int(pkt[0]&0x0f) * 4
if ihl < 20 || ihl > csumStart {
return fmt.Errorf("bad IPv4 IHL: %d", ihl)
}
var ipTmp [60]byte
copy(ipTmp[:ihl], pkt[:ihl])
ipTmp[2], ipTmp[3] = 0, 0 // total_len
ipTmp[4], ipTmp[5] = 0, 0 // id
ipTmp[10], ipTmp[11] = 0, 0 // checksum
baseIPHdrSum = uint32(checksum.Checksum(ipTmp[:ihl], 0))
}
off := 0
for i := 0; i < numSeg; i++ {
segStart := i * gso
segEnd := segStart + gso
if segEnd > payLen {
segEnd = payLen
}
segPayLen := segEnd - segStart
copy(scratch[off:], pkt[:headerLen])
copy(scratch[off+headerLen:], payload[segStart:segEnd])
seg := scratch[off : off+headerLen+segPayLen]
off += headerLen + segPayLen
segSeq := origSeq + uint32(segStart)
segFlags := origFlags
if i != numSeg-1 {
segFlags = origFlags &^ tcpFinPsh
}
totalLen := headerLen + segPayLen
// Patch IP header and write the v4 header checksum from the precomputed base.
if isV4 {
segID := origIPID + uint16(i)
binary.BigEndian.PutUint16(seg[2:4], uint16(totalLen))
binary.BigEndian.PutUint16(seg[4:6], segID)
ipSum := baseIPHdrSum + uint32(totalLen) + uint32(segID)
binary.BigEndian.PutUint16(seg[10:12], foldComplement(ipSum))
} else {
// IPv6 payload length excludes the 40-byte fixed header but
// includes any extension headers between [40:csumStart].
binary.BigEndian.PutUint16(seg[4:6], uint16(headerLen-40+segPayLen))
}
// Patch TCP header.
binary.BigEndian.PutUint32(seg[csumStart+4:csumStart+8], segSeq)
seg[csumStart+13] = segFlags
// (csum is written below; its prior contents in `seg` don't affect the
// computation since we never sum over the segment's own header.)
tcpLen := tcpHdrLen + segPayLen
paySum := uint32(checksum.Checksum(payload[segStart:segEnd], 0))
// Combine pre-folded uint32s into a wider accumulator, then fold. Using
// uint64 guards against overflow when segSeq's high bits set.
wide := uint64(baseTcpHdrSum) + uint64(paySum) + uint64(baseProtoSum)
wide += uint64(segSeq) + uint64(segFlags) + uint64(tcpLen)
wide = (wide & 0xffffffff) + (wide >> 32)
wide = (wide & 0xffffffff) + (wide >> 32)
binary.BigEndian.PutUint16(seg[csumStart+16:csumStart+18], foldComplement(uint32(wide)))
*out = append(*out, seg)
}
return nil
}
// foldComplement folds a 32-bit one's-complement partial sum to 16 bits and
// complements it, yielding the on-wire Internet checksum value.
func foldComplement(sum uint32) uint16 {
sum = (sum & 0xffff) + (sum >> 16)
sum = (sum & 0xffff) + (sum >> 16)
return ^uint16(sum)
}
// pseudoHeaderIPv4 returns the folded pseudo-header sum used to verify a TCP
// segment's checksum in tests. src/dst are 4 bytes each.
func pseudoHeaderIPv4(src, dst []byte, proto byte, tcpLen int) uint16 {
s := uint32(checksum.Checksum(src, 0)) + uint32(checksum.Checksum(dst, 0))
s += uint32(proto) + uint32(tcpLen)
s = (s & 0xffff) + (s >> 16)
s = (s & 0xffff) + (s >> 16)
return uint16(s)
}
// pseudoHeaderIPv6 returns the folded pseudo-header sum used to verify a TCP
// segment's checksum in tests. src/dst are 16 bytes each.
func pseudoHeaderIPv6(src, dst []byte, proto byte, tcpLen int) uint16 {
s := uint32(checksum.Checksum(src, 0)) + uint32(checksum.Checksum(dst, 0))
s += uint32(tcpLen>>16) + uint32(tcpLen&0xffff) + uint32(proto)
s = (s & 0xffff) + (s >> 16)
s = (s & 0xffff) + (s >> 16)
return uint16(s)
}

View File

@@ -0,0 +1,330 @@
//go:build linux && !android && !e2e_testing
// +build linux,!android,!e2e_testing
package tio
import (
"encoding/binary"
"os"
"testing"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/tcpip/checksum"
)
// verifyChecksum confirms that the one's-complement sum across `b`, seeded
// with a folded pseudo-header sum, equals all-ones (valid).
func verifyChecksum(b []byte, pseudo uint16) bool {
return checksum.Checksum(b, pseudo) == 0xffff
}
// buildTSOv4 builds a synthetic IPv4/TCP TSO superpacket with a payload of
// `payLen` bytes split at `mss`.
func buildTSOv4(t *testing.T, payLen, mss int) ([]byte, VirtioNetHdr) {
t.Helper()
const ipLen = 20
const tcpLen = 20
pkt := make([]byte, ipLen+tcpLen+payLen)
// IPv4 header
pkt[0] = 0x45 // version 4, IHL 5
// total length is meaningless for TSO but set it anyway
binary.BigEndian.PutUint16(pkt[2:4], uint16(ipLen+tcpLen+payLen))
binary.BigEndian.PutUint16(pkt[4:6], 0x4242) // original ID
pkt[8] = 64 // TTL
pkt[9] = unix.IPPROTO_TCP
copy(pkt[12:16], []byte{10, 0, 0, 1}) // src
copy(pkt[16:20], []byte{10, 0, 0, 2}) // dst
// TCP header
binary.BigEndian.PutUint16(pkt[20:22], 12345) // sport
binary.BigEndian.PutUint16(pkt[22:24], 80) // dport
binary.BigEndian.PutUint32(pkt[24:28], 10000) // seq
binary.BigEndian.PutUint32(pkt[28:32], 20000) // ack
pkt[32] = 0x50 // data offset 5 words
pkt[33] = 0x18 // ACK | PSH
binary.BigEndian.PutUint16(pkt[34:36], 65535) // window
// payload
for i := 0; i < payLen; i++ {
pkt[ipLen+tcpLen+i] = byte(i & 0xff)
}
return pkt, VirtioNetHdr{
Flags: unix.VIRTIO_NET_HDR_F_NEEDS_CSUM,
GSOType: unix.VIRTIO_NET_HDR_GSO_TCPV4,
HdrLen: uint16(ipLen + tcpLen),
GSOSize: uint16(mss),
CsumStart: uint16(ipLen),
CsumOffset: 16,
}
}
func TestSegmentTCPv4(t *testing.T) {
const mss = 100
const numSeg = 3
pkt, hdr := buildTSOv4(t, mss*numSeg, mss)
scratch := make([]byte, tunSegBufSize)
var out [][]byte
if err := segmentTCP(pkt, hdr, &out, scratch); err != nil {
t.Fatalf("segmentTCP: %v", err)
}
if len(out) != numSeg {
t.Fatalf("expected %d segments, got %d", numSeg, len(out))
}
for i, seg := range out {
if len(seg) != 40+mss {
t.Errorf("seg %d: unexpected len %d", i, len(seg))
}
totalLen := binary.BigEndian.Uint16(seg[2:4])
if totalLen != uint16(40+mss) {
t.Errorf("seg %d: total_len=%d want %d", i, totalLen, 40+mss)
}
id := binary.BigEndian.Uint16(seg[4:6])
if id != 0x4242+uint16(i) {
t.Errorf("seg %d: ip id=%#x want %#x", i, id, 0x4242+uint16(i))
}
seq := binary.BigEndian.Uint32(seg[24:28])
wantSeq := uint32(10000 + i*mss)
if seq != wantSeq {
t.Errorf("seg %d: seq=%d want %d", i, seq, wantSeq)
}
flags := seg[33]
wantFlags := byte(0x10) // ACK only, PSH cleared
if i == numSeg-1 {
wantFlags = 0x18 // ACK | PSH preserved on last
}
if flags != wantFlags {
t.Errorf("seg %d: flags=%#x want %#x", i, flags, wantFlags)
}
// IPv4 header checksum must verify against itself.
if !verifyChecksum(seg[:20], 0) {
t.Errorf("seg %d: bad IPv4 header checksum", i)
}
// TCP checksum must verify against the pseudo-header.
psum := pseudoHeaderIPv4(seg[12:16], seg[16:20], unix.IPPROTO_TCP, 20+mss)
if !verifyChecksum(seg[20:], psum) {
t.Errorf("seg %d: bad TCP checksum", i)
}
}
}
func TestSegmentTCPv4OddTail(t *testing.T) {
// Payload of 250 bytes with MSS 100 → segments of 100, 100, 50.
pkt, hdr := buildTSOv4(t, 250, 100)
scratch := make([]byte, tunSegBufSize)
var out [][]byte
if err := segmentTCP(pkt, hdr, &out, scratch); err != nil {
t.Fatalf("segmentTCP: %v", err)
}
if len(out) != 3 {
t.Fatalf("want 3 segments, got %d", len(out))
}
wantPayLens := []int{100, 100, 50}
for i, seg := range out {
if len(seg)-40 != wantPayLens[i] {
t.Errorf("seg %d: pay len %d want %d", i, len(seg)-40, wantPayLens[i])
}
if !verifyChecksum(seg[:20], 0) {
t.Errorf("seg %d: bad IPv4 header checksum", i)
}
psum := pseudoHeaderIPv4(seg[12:16], seg[16:20], unix.IPPROTO_TCP, 20+wantPayLens[i])
if !verifyChecksum(seg[20:], psum) {
t.Errorf("seg %d: bad TCP checksum", i)
}
}
}
func TestSegmentTCPv6(t *testing.T) {
const ipLen = 40
const tcpLen = 20
const mss = 120
const numSeg = 2
payLen := mss * numSeg
pkt := make([]byte, ipLen+tcpLen+payLen)
// IPv6 header
pkt[0] = 0x60 // version 6
binary.BigEndian.PutUint16(pkt[4:6], uint16(tcpLen+payLen))
pkt[6] = unix.IPPROTO_TCP
pkt[7] = 64
// src/dst fe80::1 / fe80::2
pkt[8] = 0xfe
pkt[9] = 0x80
pkt[23] = 1
pkt[24] = 0xfe
pkt[25] = 0x80
pkt[39] = 2
// TCP header
binary.BigEndian.PutUint16(pkt[40:42], 12345)
binary.BigEndian.PutUint16(pkt[42:44], 80)
binary.BigEndian.PutUint32(pkt[44:48], 7)
binary.BigEndian.PutUint32(pkt[48:52], 99)
pkt[52] = 0x50
pkt[53] = 0x19 // FIN | ACK | PSH — exercise FIN clearing too
binary.BigEndian.PutUint16(pkt[54:56], 65535)
for i := 0; i < payLen; i++ {
pkt[ipLen+tcpLen+i] = byte(i)
}
hdr := VirtioNetHdr{
Flags: unix.VIRTIO_NET_HDR_F_NEEDS_CSUM,
GSOType: unix.VIRTIO_NET_HDR_GSO_TCPV6,
HdrLen: uint16(ipLen + tcpLen),
GSOSize: uint16(mss),
CsumStart: uint16(ipLen),
CsumOffset: 16,
}
scratch := make([]byte, tunSegBufSize)
var out [][]byte
if err := segmentTCP(pkt, hdr, &out, scratch); err != nil {
t.Fatalf("segmentTCP: %v", err)
}
if len(out) != numSeg {
t.Fatalf("want %d segments, got %d", numSeg, len(out))
}
for i, seg := range out {
if len(seg) != ipLen+tcpLen+mss {
t.Errorf("seg %d: len %d want %d", i, len(seg), ipLen+tcpLen+mss)
}
pl := binary.BigEndian.Uint16(seg[4:6])
if pl != uint16(tcpLen+mss) {
t.Errorf("seg %d: payload_length=%d want %d", i, pl, tcpLen+mss)
}
seq := binary.BigEndian.Uint32(seg[44:48])
if seq != uint32(7+i*mss) {
t.Errorf("seg %d: seq=%d want %d", i, seq, 7+i*mss)
}
flags := seg[53]
// Original flags = 0x19 (FIN|ACK|PSH). FIN(0x01)+PSH(0x08) should be
// cleared on all but the last; ACK(0x10) always preserved.
wantFlags := byte(0x10)
if i == numSeg-1 {
wantFlags = 0x19
}
if flags != wantFlags {
t.Errorf("seg %d: flags=%#x want %#x", i, flags, wantFlags)
}
psum := pseudoHeaderIPv6(seg[8:24], seg[24:40], unix.IPPROTO_TCP, tcpLen+mss)
if !verifyChecksum(seg[ipLen:], psum) {
t.Errorf("seg %d: bad TCP checksum", i)
}
}
}
func TestSegmentGSONonePassesThrough(t *testing.T) {
pkt, hdr := buildTSOv4(t, 100, 100)
hdr.GSOType = unix.VIRTIO_NET_HDR_GSO_NONE
hdr.Flags = 0 // no NEEDS_CSUM, leave packet untouched
scratch := make([]byte, tunSegBufSize)
var out [][]byte
if err := segmentInto(pkt, hdr, &out, scratch); err != nil {
t.Fatalf("segmentInto: %v", err)
}
if len(out) != 1 {
t.Fatalf("want 1 segment, got %d", len(out))
}
if len(out[0]) != len(pkt) {
t.Fatalf("unexpected length: %d vs %d", len(out[0]), len(pkt))
}
}
func TestSegmentRejectsUDP(t *testing.T) {
hdr := VirtioNetHdr{GSOType: unix.VIRTIO_NET_HDR_GSO_UDP}
var out [][]byte
if err := segmentInto(nil, hdr, &out, nil); err == nil {
t.Fatalf("expected rejection for UDP GSO")
}
}
func BenchmarkSegmentTCPv4(b *testing.B) {
sizes := []struct {
name string
payLen int
mss int
}{
{"64KiB_MSS1460", 65000, 1460},
{"16KiB_MSS1460", 16384, 1460},
{"4KiB_MSS1460", 4096, 1460},
}
for _, sz := range sizes {
b.Run(sz.name, func(b *testing.B) {
const ipLen = 20
const tcpLen = 20
pkt := make([]byte, ipLen+tcpLen+sz.payLen)
pkt[0] = 0x45
binary.BigEndian.PutUint16(pkt[2:4], uint16(ipLen+tcpLen+sz.payLen))
binary.BigEndian.PutUint16(pkt[4:6], 0x4242)
pkt[8] = 64
pkt[9] = unix.IPPROTO_TCP
copy(pkt[12:16], []byte{10, 0, 0, 1})
copy(pkt[16:20], []byte{10, 0, 0, 2})
binary.BigEndian.PutUint16(pkt[20:22], 12345)
binary.BigEndian.PutUint16(pkt[22:24], 80)
binary.BigEndian.PutUint32(pkt[24:28], 10000)
binary.BigEndian.PutUint32(pkt[28:32], 20000)
pkt[32] = 0x50
pkt[33] = 0x18
binary.BigEndian.PutUint16(pkt[34:36], 65535)
for i := 0; i < sz.payLen; i++ {
pkt[ipLen+tcpLen+i] = byte(i)
}
hdr := VirtioNetHdr{
Flags: unix.VIRTIO_NET_HDR_F_NEEDS_CSUM,
GSOType: unix.VIRTIO_NET_HDR_GSO_TCPV4,
HdrLen: uint16(ipLen + tcpLen),
GSOSize: uint16(sz.mss),
CsumStart: uint16(ipLen),
CsumOffset: 16,
}
scratch := make([]byte, tunSegBufSize)
out := make([][]byte, 0, 64)
b.SetBytes(int64(len(pkt)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
out = out[:0]
if err := segmentTCP(pkt, hdr, &out, scratch); err != nil {
b.Fatal(err)
}
}
})
}
}
// TestTunFileWriteVnetHdrNoAlloc verifies the IFF_VNET_HDR fast-path write is
// allocation-free. We write to /dev/null so every call succeeds synchronously.
func TestTunFileWriteVnetHdrNoAlloc(t *testing.T) {
fd, err := unix.Open("/dev/null", os.O_WRONLY, 0)
if err != nil {
t.Fatalf("open /dev/null: %v", err)
}
t.Cleanup(func() { _ = unix.Close(fd) })
tf := &Offload{fd: fd}
tf.writeIovs[0].Base = &validVnetHdr[0]
tf.writeIovs[0].SetLen(virtioNetHdrLen)
payload := make([]byte, 1400)
// Warm up (first call may trigger one-time internal allocations elsewhere).
if _, err := tf.Write(payload); err != nil {
t.Fatalf("Write: %v", err)
}
allocs := testing.AllocsPerRun(1000, func() {
if _, err := tf.Write(payload); err != nil {
t.Fatalf("Write: %v", err)
}
})
if allocs != 0 {
t.Fatalf("Write allocated %.1f times per call, want 0", allocs)
}
}

View File

@@ -0,0 +1,39 @@
package tio
import "encoding/binary"
// Size of the legacy struct virtio_net_hdr that the kernel prepends/expects on
// a TUN opened with IFF_VNET_HDR (TUNSETVNETHDRSZ not set).
const virtioNetHdrLen = 10
type VirtioNetHdr struct {
Flags uint8
GSOType uint8
HdrLen uint16
GSOSize uint16
CsumStart uint16
CsumOffset uint16
}
// decode reads a virtio_net_hdr in host byte order (TUN default; we never
// call TUNSETVNETLE so the kernel matches our endianness).
func (h *VirtioNetHdr) decode(b []byte) {
h.Flags = b[0]
h.GSOType = b[1]
h.HdrLen = binary.NativeEndian.Uint16(b[2:4])
h.GSOSize = binary.NativeEndian.Uint16(b[4:6])
h.CsumStart = binary.NativeEndian.Uint16(b[6:8])
h.CsumOffset = binary.NativeEndian.Uint16(b[8:10])
}
// encode is the inverse of decode: writes the virtio_net_hdr fields into b
// (must be at least virtioNetHdrLen bytes). Used to emit a TSO superpacket
// on egress.
func (h *VirtioNetHdr) encode(b []byte) {
b[0] = h.Flags
b[1] = h.GSOType
binary.NativeEndian.PutUint16(b[2:4], h.HdrLen)
binary.NativeEndian.PutUint16(b[4:6], h.GSOSize)
binary.NativeEndian.PutUint16(b[6:8], h.CsumStart)
binary.NativeEndian.PutUint16(b[8:10], h.CsumOffset)
}

View File

@@ -34,6 +34,7 @@ type tun struct {
TXQueueLen int
deviceIndex int
ioctlFd uintptr
vnetHdr bool
Routes atomic.Pointer[[]Route]
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
@@ -72,7 +73,9 @@ type ifreqQLEN struct {
}
func newTunFromFd(c *config.C, l *slog.Logger, deviceFd int, vpnNetworks []netip.Prefix) (*tun, error) {
t, err := newTunGeneric(c, l, deviceFd, vpnNetworks)
// We don't know what flags the caller opened this fd with and can't turn
// on IFF_VNET_HDR after TUNSETIFF, so skip offload on inherited fds.
t, err := newTunGeneric(c, l, deviceFd, false, vpnNetworks)
if err != nil {
return nil, err
}
@@ -117,6 +120,10 @@ func tunSetIff(fd int, name string, flags uint16) (string, error) {
return strings.Trim(string(req.Name[:]), "\x00"), nil
}
// tsoOffloadFlags are the TUN_F_* bits we ask the kernel to enable when a
// TSO-capable TUN is available. CSUM is required as a prerequisite for TSO.
const tsoOffloadFlags = unix.TUN_F_CSUM | unix.TUN_F_TSO4 | unix.TUN_F_TSO6
func newTun(c *config.C, l *slog.Logger, vpnNetworks []netip.Prefix, multiqueue bool) (*tun, error) {
baseFlags := uint16(unix.IFF_TUN | unix.IFF_NO_PI)
if multiqueue {
@@ -124,17 +131,37 @@ func newTun(c *config.C, l *slog.Logger, vpnNetworks []netip.Prefix, multiqueue
}
nameStr := c.GetString("tun.dev", "")
// First try to open with IFF_VNET_HDR + TUNSETOFFLOAD so we can receive
// TSO superpackets. If either step fails (older kernel, unprivileged
// container, etc.) we close and fall back to a plain TUN.
fd, err := openTunDev()
if err != nil {
return nil, err
}
name, err := tunSetIff(fd, nameStr, baseFlags)
vnetHdr := true
name, err := tunSetIff(fd, nameStr, baseFlags|unix.IFF_VNET_HDR)
if err != nil {
_ = unix.Close(fd)
return nil, &NameError{Name: nameStr, Underlying: err}
vnetHdr = false
} else if err = ioctl(uintptr(fd), unix.TUNSETOFFLOAD, uintptr(tsoOffloadFlags)); err != nil {
l.Warn("Failed to enable TUN offload (TSO); proceeding without virtio headers", "error", err)
_ = unix.Close(fd)
vnetHdr = false
}
t, err := newTunGeneric(c, l, fd, vpnNetworks)
if !vnetHdr {
fd, err = openTunDev()
if err != nil {
return nil, err
}
name, err = tunSetIff(fd, nameStr, baseFlags)
if err != nil {
_ = unix.Close(fd)
return nil, &NameError{Name: nameStr, Underlying: err}
}
}
t, err := newTunGeneric(c, l, fd, vnetHdr, vpnNetworks)
if err != nil {
return nil, err
}
@@ -145,8 +172,15 @@ func newTun(c *config.C, l *slog.Logger, vpnNetworks []netip.Prefix, multiqueue
}
// newTunGeneric does all the stuff common to different tun initialization paths. It will close your files on error.
func newTunGeneric(c *config.C, l *slog.Logger, fd int, vpnNetworks []netip.Prefix) (*tun, error) {
container, err := tio.NewPollContainer()
func newTunGeneric(c *config.C, l *slog.Logger, fd int, vnetHdr bool, vpnNetworks []netip.Prefix) (*tun, error) {
var container tio.Container
var err error
if vnetHdr {
container, err = tio.NewOffloadContainer()
} else {
container, err = tio.NewPollContainer()
}
if err != nil {
_ = unix.Close(fd)
return nil, err
@@ -160,6 +194,7 @@ func newTunGeneric(c *config.C, l *slog.Logger, fd int, vpnNetworks []netip.Pref
t := &tun{
readers: container,
closeLock: sync.Mutex{},
vnetHdr: vnetHdr,
vpnNetworks: vpnNetworks,
TXQueueLen: c.GetInt("tun.tx_queue", 500),
useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
@@ -271,11 +306,21 @@ func (t *tun) NewMultiQueueReader() error {
}
flags := uint16(unix.IFF_TUN | unix.IFF_NO_PI | unix.IFF_MULTI_QUEUE)
if t.vnetHdr {
flags |= unix.IFF_VNET_HDR
}
if _, err = tunSetIff(fd, t.Device, flags); err != nil {
_ = unix.Close(fd)
return err
}
if t.vnetHdr {
if err = ioctl(uintptr(fd), unix.TUNSETOFFLOAD, uintptr(tsoOffloadFlags)); err != nil {
_ = unix.Close(fd)
return fmt.Errorf("failed to enable offload on multiqueue tun fd: %w", err)
}
}
err = t.readers.Add(fd)
if err != nil {
_ = unix.Close(fd)

View File

@@ -24,6 +24,43 @@ type StdConn struct {
isV4 bool
l *slog.Logger
batch int
// sendmmsg scratch. Each queue has its own StdConn, so no locking is
// needed. Sized to MaxWriteBatch at construction; WriteBatch chunks
// larger inputs.
writeMsgs []rawMessage
writeIovs []iovec
writeNames [][]byte
// Per-entry UDP_SEGMENT cmsg scratch. writeCmsg is one contiguous slab
// of MaxWriteBatch * writeCmsgSpace bytes; each entry's cmsg header is
// pre-filled once in prepareWriteMessages. WriteBatch only rewrites the
// 2-byte gso_size payload (and toggles Hdr.Control on/off) per call.
writeCmsg []byte
writeCmsgSpace int
// writeEntryEnd[e] is the bufs index *after* the last packet packed
// into mmsghdr entry e. Used to rewind `i` on partial sendmmsg success.
writeEntryEnd []int
// Preallocated closure + in/out slots for sendmmsg, so the hot path
// does not heap-allocate a fresh closure per call.
writeChunk int
writeSent int
writeErrno syscall.Errno
writeFunc func(fd uintptr) bool
// UDP GSO (sendmsg with UDP_SEGMENT cmsg) support. gsoSupported is
// probed once at socket creation. When true, WriteSegmented takes a
// single-syscall GSO path; otherwise it falls back to a WriteTo loop.
gsoSupported bool
// UDP GRO (recvmsg with UDP_GRO cmsg) support. groSupported is probed
// once at socket creation. When true, listenOutBatch allocates larger
// RX buffers and a per-entry cmsg slot so the kernel can coalesce
// consecutive same-flow datagrams into a single recvmmsg entry; the
// delivered cmsg carries the gso_size used to split them back apart.
groSupported bool
}
func setReusePort(network, address string, c syscall.RawConn) error {
@@ -70,9 +107,102 @@ func NewListener(l *slog.Logger, ip netip.Addr, port int, multi bool, batch int)
}
out.isV4 = af == unix.AF_INET
out.prepareWriteMessages(MaxWriteBatch)
out.writeFunc = out.sendmmsgRawWrite
out.prepareGSO()
// GRO delivers coalesced superpackets that need a cmsg to split back
// into segments. The single-packet RX path uses ReadFromUDPAddrPort
// and cannot see that cmsg, so only enable GRO for the batch path.
if batch > 1 {
out.prepareGRO()
}
return out, nil
}
// prepareWriteMessages allocates one mmsghdr/iovec/sockaddr/cmsg scratch
// slot per sendmmsg entry. The iovec slab is sized to the same n so a
// single entry can fan out to up to n iovecs (needed for UDP_SEGMENT runs
// that coalesce consecutive bufs into one entry). Hdr.Iov / Hdr.Iovlen /
// Hdr.Control / Hdr.Controllen are wired per call since each entry can
// span a variable number of iovecs and may or may not carry a cmsg.
func (u *StdConn) prepareWriteMessages(n int) {
u.writeMsgs = make([]rawMessage, n)
u.writeIovs = make([]iovec, n)
u.writeNames = make([][]byte, n)
u.writeEntryEnd = make([]int, n)
u.writeCmsgSpace = unix.CmsgSpace(2)
u.writeCmsg = make([]byte, n*u.writeCmsgSpace)
for k := 0; k < n; k++ {
off := k * u.writeCmsgSpace
h := (*unix.Cmsghdr)(unsafe.Pointer(&u.writeCmsg[off]))
h.Level = unix.SOL_UDP
h.Type = unix.UDP_SEGMENT
setCmsgLen(h, unix.CmsgLen(2))
}
for i := range u.writeMsgs {
u.writeNames[i] = make([]byte, unix.SizeofSockaddrInet6)
u.writeMsgs[i].Hdr.Name = &u.writeNames[i][0]
}
}
// maxGSOSegments caps the per-sendmsg GSO fan-out. Linux kernels have
// historically capped UDP_MAX_SEGMENTS at 64; newer kernels raise it to 128
// but we stay conservative so the same code works everywhere.
const maxGSOSegments = 64
// maxGSOBytes bounds the total payload per sendmsg() when UDP_SEGMENT is
// set. The kernel stitches all iovecs into a single skb whose length the
// UDP length field can represent, and also enforces sk_gso_max_size (which
// on most devices is 65536). We use 65535 so ciphertext + headers always
// fits, avoiding EMSGSIZE on large TSO superpackets.
const maxGSOBytes = 65535
// prepareGSO probes UDP_SEGMENT support
func (u *StdConn) prepareGSO() {
var probeErr error
if err := u.rawConn.Control(func(fd uintptr) {
probeErr = unix.SetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_SEGMENT, 0)
}); err != nil {
return
}
if probeErr != nil {
return
}
u.gsoSupported = true
}
// udpGROBufferSize sizes the per-entry recvmmsg buffer when UDP_GRO is on.
// The kernel stitches a run of same-flow datagrams into a single skb whose
// length is bounded by sk_gso_max_size (typically 65535); anything larger
// would be MSG_TRUNCed. We use the maximum representable UDP length so a
// full superpacket always lands intact.
const udpGROBufferSize = 65535
// udpGROCmsgPayload is the size of the UDP_GRO cmsg data delivered by the
// kernel: a single int (gso_size in bytes). See udp_cmsg_recv() in
// net/ipv4/udp.c.
const udpGROCmsgPayload = 4
// prepareGRO turns on UDP_GRO so the kernel coalesces consecutive same-flow
// datagrams into one recvmmsg entry, with a cmsg carrying the gso_size used
// to split them back apart on the application side.
func (u *StdConn) prepareGRO() {
var probeErr error
if err := u.rawConn.Control(func(fd uintptr) {
probeErr = unix.SetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_GRO, 1)
}); err != nil {
return
}
if probeErr != nil {
return
}
u.groSupported = true
}
func (u *StdConn) SupportsMultipleReaders() bool {
return true
}
@@ -194,7 +324,12 @@ func (u *StdConn) listenOutBatch(r EncReader, flush func()) error {
var operr error
bufSize := MTU
msgs, buffers, names := u.PrepareRawMessages(u.batch, bufSize)
cmsgSpace := 0
if u.groSupported {
bufSize = udpGROBufferSize
cmsgSpace = unix.CmsgSpace(udpGROCmsgPayload)
}
msgs, buffers, names, _ := u.PrepareRawMessages(u.batch, bufSize, cmsgSpace)
//reader needs to capture variables from this function, since it's used as a lambda with rawConn.Read
//defining it outside the loop so it gets re-used
@@ -204,6 +339,11 @@ func (u *StdConn) listenOutBatch(r EncReader, flush func()) error {
}
for {
if cmsgSpace > 0 {
for i := range msgs {
setMsgControllen(&msgs[i].Hdr, cmsgSpace)
}
}
err := u.rawConn.Read(reader)
if err != nil {
return err
@@ -222,7 +362,25 @@ func (u *StdConn) listenOutBatch(r EncReader, flush func()) error {
from := netip.AddrPortFrom(ip.Unmap(), binary.BigEndian.Uint16(names[i][2:4]))
payload := buffers[i][:msgs[i].Len]
r(from, payload)
segSize := 0
if u.groSupported {
segSize = parseUDPGRO(&msgs[i].Hdr)
}
if segSize <= 0 || segSize >= len(payload) {
// No coalescing happened (or a lone datagram).
r(from, payload)
continue
}
// GRO superpacket: the kernel guarantees every segment is
// exactly segSize bytes except for the final one, which may be
// short.
for off := 0; off < len(payload); off += segSize {
end := off + segSize
if end > len(payload) {
end = len(payload)
}
r(from, payload[off:end])
}
}
// End-of-batch: let callers (e.g. TUN write coalescer) flush any
// state they accumulated across this batch.
@@ -230,6 +388,38 @@ func (u *StdConn) listenOutBatch(r EncReader, flush func()) error {
}
}
// parseUDPGRO walks the control buffer on hdr looking for a SOL_UDP/UDP_GRO
// cmsg and returns the gso_size (bytes per coalesced segment) it carries.
// Returns 0 when no UDP_GRO cmsg is present, which is the normal case for
// lone datagrams that the kernel did not coalesce.
func parseUDPGRO(hdr *msghdr) int {
controllen := int(hdr.Controllen)
if controllen < unix.SizeofCmsghdr || hdr.Control == nil {
return 0
}
ctrl := unsafe.Slice(hdr.Control, controllen)
off := 0
for off+unix.SizeofCmsghdr <= len(ctrl) {
ch := (*unix.Cmsghdr)(unsafe.Pointer(&ctrl[off]))
clen := int(ch.Len)
if clen < unix.SizeofCmsghdr || off+clen > len(ctrl) {
return 0
}
if ch.Level == unix.SOL_UDP && ch.Type == unix.UDP_GRO {
dataOff := off + unix.CmsgLen(0)
if dataOff+udpGROCmsgPayload <= len(ctrl) {
return int(int32(binary.NativeEndian.Uint32(ctrl[dataOff : dataOff+udpGROCmsgPayload])))
}
return 0
}
// Advance by the aligned cmsg space. CmsgSpace(n) is the stride
// from one header to the next (len aligned up to the platform's
// cmsg alignment).
off += unix.CmsgSpace(clen - unix.CmsgLen(0))
}
return 0
}
func (u *StdConn) ListenOut(r EncReader, flush func()) error {
if u.batch == 1 {
return u.listenOutSingle(r, flush)
@@ -243,19 +433,209 @@ func (u *StdConn) WriteTo(b []byte, ip netip.AddrPort) error {
return err
}
// WriteBatch sends bufs via sendmmsg(2) using the preallocated scratch on
// StdConn. Consecutive packets to the same destination with matching segment
// sizes (all but possibly the last) are coalesced into a single mmsghdr entry
// carrying a UDP_SEGMENT cmsg, so one syscall can mix runs of GSO superpackets
// with plain one-off datagrams. Without GSO support every packet is its own
// entry, matching the prior behaviour.
//
// Chunks larger than the scratch are processed across multiple syscalls. If
// sendmmsg returns a fatal error before any entry is sent we fall back to
// per-packet WriteTo for that chunk so the caller still gets best-effort
// delivery.
func (u *StdConn) WriteBatch(bufs [][]byte, addrs []netip.AddrPort) error {
if len(bufs) != len(addrs) {
return fmt.Errorf("WriteBatch: len(bufs)=%d != len(addrs)=%d", len(bufs), len(addrs))
}
//todo use sendmmsg
for i := 0; i < len(bufs); i++ {
if _, err := u.udpConn.WriteToUDPAddrPort(bufs[i], addrs[i]); err != nil {
return err
i := 0
for i < len(bufs) {
baseI := i
entry := 0
iovIdx := 0
for entry < len(u.writeMsgs) && i < len(bufs) {
iovBudget := len(u.writeIovs) - iovIdx
if iovBudget < 1 {
break
}
runLen, segSize := u.planRun(bufs, addrs, i, iovBudget)
if runLen == 0 {
break
}
for k := 0; k < runLen; k++ {
b := bufs[i+k]
if len(b) == 0 {
u.writeIovs[iovIdx+k].Base = nil
setIovLen(&u.writeIovs[iovIdx+k], 0)
} else {
u.writeIovs[iovIdx+k].Base = &b[0]
setIovLen(&u.writeIovs[iovIdx+k], len(b))
}
}
nlen, err := writeSockaddr(u.writeNames[entry], addrs[i], u.isV4)
if err != nil {
return err
}
hdr := &u.writeMsgs[entry].Hdr
hdr.Iov = &u.writeIovs[iovIdx]
setMsgIovlen(hdr, runLen)
hdr.Namelen = uint32(nlen)
if runLen >= 2 {
off := entry * u.writeCmsgSpace
dataOff := off + unix.CmsgLen(0)
binary.NativeEndian.PutUint16(u.writeCmsg[dataOff:dataOff+2], uint16(segSize))
hdr.Control = &u.writeCmsg[off]
setMsgControllen(hdr, u.writeCmsgSpace)
} else {
hdr.Control = nil
setMsgControllen(hdr, 0)
}
i += runLen
iovIdx += runLen
u.writeEntryEnd[entry] = i
entry++
}
if entry == 0 {
return fmt.Errorf("sendmmsg: no progress")
}
sent, serr := u.sendmmsg(entry)
if serr != nil && sent <= 0 {
// Nothing went out for this chunk; fall back to WriteTo for each
// packet that was queued this iteration.
for k := baseI; k < i; k++ {
if werr := u.WriteTo(bufs[k], addrs[k]); werr != nil {
return werr
}
}
continue
}
if sent == 0 {
return fmt.Errorf("sendmmsg made no progress")
}
// Rewind i to the end of the last successfully sent entry. For a
// full-success send this leaves i unchanged; for a partial send it
// replays the remainder on the next outer-loop iteration.
i = u.writeEntryEnd[sent-1]
}
return nil
}
// planRun groups consecutive packets starting at `start` that can be sent as
// a single UDP GSO superpacket (one sendmmsg entry with UDP_SEGMENT cmsg).
// A run of length 1 means the entry carries no cmsg and the kernel treats
// it as a plain datagram. Returns the run length and the per-segment size
// (which equals len(bufs[start])). Without GSO support every call returns
// runLen=1.
func (u *StdConn) planRun(bufs [][]byte, addrs []netip.AddrPort, start, iovBudget int) (int, int) {
if start >= len(bufs) || iovBudget < 1 {
return 0, 0
}
segSize := len(bufs[start])
if !u.gsoSupported || segSize == 0 || segSize > maxGSOBytes {
return 1, segSize
}
dst := addrs[start]
maxLen := maxGSOSegments
if iovBudget < maxLen {
maxLen = iovBudget
}
runLen := 1
total := segSize
for runLen < maxLen && start+runLen < len(bufs) {
nextLen := len(bufs[start+runLen])
if nextLen == 0 || nextLen > segSize {
break
}
if addrs[start+runLen] != dst {
break
}
if total+nextLen > maxGSOBytes {
break
}
total += nextLen
runLen++
if nextLen < segSize {
// A short packet must be the last in the run.
break
}
}
return runLen, segSize
}
// sendmmsgRawWrite is the preallocated callback passed to rawConn.Write. It
// reads its input (u.writeChunk) and writes its outputs (u.writeSent,
// u.writeErrno) through StdConn fields so the closure itself does not
// capture per-call locals and therefore does not heap-allocate.
func (u *StdConn) sendmmsgRawWrite(fd uintptr) bool {
r1, _, errno := unix.Syscall6(
unix.SYS_SENDMMSG,
fd,
uintptr(unsafe.Pointer(&u.writeMsgs[0])),
uintptr(u.writeChunk),
0,
0,
0,
)
if errno == syscall.EAGAIN || errno == syscall.EWOULDBLOCK {
return false
}
u.writeSent = int(r1)
u.writeErrno = errno
return true
}
func (u *StdConn) sendmmsg(n int) (int, error) {
u.writeChunk = n
u.writeSent = 0
u.writeErrno = 0
if err := u.rawConn.Write(u.writeFunc); err != nil {
return u.writeSent, err
}
if u.writeErrno != 0 {
return u.writeSent, &net.OpError{Op: "sendmmsg", Err: u.writeErrno}
}
return u.writeSent, nil
}
// writeSockaddr encodes addr into buf (which must be at least
// SizeofSockaddrInet6 bytes). Returns the number of bytes used. If isV4 is
// true and addr is not a v4 (or v4-in-v6) address, returns an error.
func writeSockaddr(buf []byte, addr netip.AddrPort, isV4 bool) (int, error) {
ap := addr.Addr().Unmap()
if isV4 {
if !ap.Is4() {
return 0, ErrInvalidIPv6RemoteForSocket
}
// struct sockaddr_in: { sa_family_t(2), in_port_t(2, BE), in_addr(4), zero(8) }
// sa_family is host endian.
binary.NativeEndian.PutUint16(buf[0:2], unix.AF_INET)
binary.BigEndian.PutUint16(buf[2:4], addr.Port())
ip4 := ap.As4()
copy(buf[4:8], ip4[:])
for j := 8; j < 16; j++ {
buf[j] = 0
}
return unix.SizeofSockaddrInet4, nil
}
// struct sockaddr_in6: { sa_family_t(2), in_port_t(2, BE), flowinfo(4), in6_addr(16), scope_id(4) }
binary.NativeEndian.PutUint16(buf[0:2], unix.AF_INET6)
binary.BigEndian.PutUint16(buf[2:4], addr.Port())
binary.NativeEndian.PutUint32(buf[4:8], 0)
ip6 := addr.Addr().As16()
copy(buf[8:24], ip6[:])
binary.NativeEndian.PutUint32(buf[24:28], 0)
return unix.SizeofSockaddrInet6, nil
}
func (u *StdConn) ReloadConfig(c *config.C) {
b := c.GetInt("listen.read_buffer", 0)
if b > 0 {

View File

@@ -30,11 +30,16 @@ type rawMessage struct {
Len uint32
}
func (u *StdConn) PrepareRawMessages(n, bufSize int) ([]rawMessage, [][]byte, [][]byte) {
func (u *StdConn) PrepareRawMessages(n, bufSize, cmsgSpace int) ([]rawMessage, [][]byte, [][]byte, []byte) {
msgs := make([]rawMessage, n)
buffers := make([][]byte, n)
names := make([][]byte, n)
var cmsgs []byte
if cmsgSpace > 0 {
cmsgs = make([]byte, n*cmsgSpace)
}
for i := range msgs {
buffers[i] = make([]byte, bufSize)
names[i] = make([]byte, unix.SizeofSockaddrInet6)
@@ -48,9 +53,30 @@ func (u *StdConn) PrepareRawMessages(n, bufSize int) ([]rawMessage, [][]byte, []
msgs[i].Hdr.Name = &names[i][0]
msgs[i].Hdr.Namelen = uint32(len(names[i]))
if cmsgSpace > 0 {
msgs[i].Hdr.Control = &cmsgs[i*cmsgSpace]
msgs[i].Hdr.Controllen = uint32(cmsgSpace)
}
}
return msgs, buffers, names
return msgs, buffers, names, cmsgs
}
func setIovLen(v *iovec, n int) {
v.Len = uint32(n)
}
func setMsgIovlen(m *msghdr, n int) {
m.Iovlen = uint32(n)
}
func setMsgControllen(m *msghdr, n int) {
m.Controllen = uint32(n)
}
func setCmsgLen(h *unix.Cmsghdr, n int) {
h.Len = uint32(n)
}
func setIovLen(v *iovec, n int) {

View File

@@ -33,11 +33,16 @@ type rawMessage struct {
Pad0 [4]byte
}
func (u *StdConn) PrepareRawMessages(n, bufSize int) ([]rawMessage, [][]byte, [][]byte) {
func (u *StdConn) PrepareRawMessages(n, bufSize, cmsgSpace int) ([]rawMessage, [][]byte, [][]byte, []byte) {
msgs := make([]rawMessage, n)
buffers := make([][]byte, n)
names := make([][]byte, n)
var cmsgs []byte
if cmsgSpace > 0 {
cmsgs = make([]byte, n*cmsgSpace)
}
for i := range msgs {
buffers[i] = make([]byte, bufSize)
names[i] = make([]byte, unix.SizeofSockaddrInet6)
@@ -51,9 +56,30 @@ func (u *StdConn) PrepareRawMessages(n, bufSize int) ([]rawMessage, [][]byte, []
msgs[i].Hdr.Name = &names[i][0]
msgs[i].Hdr.Namelen = uint32(len(names[i]))
if cmsgSpace > 0 {
msgs[i].Hdr.Control = &cmsgs[i*cmsgSpace]
msgs[i].Hdr.Controllen = uint64(cmsgSpace)
}
}
return msgs, buffers, names
return msgs, buffers, names, cmsgs
}
func setIovLen(v *iovec, n int) {
v.Len = uint64(n)
}
func setMsgIovlen(m *msghdr, n int) {
m.Iovlen = uint64(n)
}
func setMsgControllen(m *msghdr, n int) {
m.Controllen = uint64(n)
}
func setCmsgLen(h *unix.Cmsghdr, n int) {
h.Len = uint64(n)
}
func setIovLen(v *iovec, n int) {