mirror of
https://github.com/slackhq/nebula.git
synced 2026-05-16 04:47:38 +02:00
broken checkpt
This commit is contained in:
@@ -147,44 +147,3 @@ func mergeECNIntoSeed(seedHdr, pktHdr []byte, isV6 bool) {
|
||||
seedHdr[1] |= pktHdr[1] & 0x03
|
||||
}
|
||||
}
|
||||
|
||||
// Arena is an injectable byte-slab that hands out non-overlapping borrowed
|
||||
// slices via Reserve and releases them in bulk via Reset. Coalescers take
|
||||
// an *Arena at construction so the caller controls the slab lifetime and
|
||||
// can share one slab across multiple coalescers (MultiCoalescer hands the
|
||||
// same *Arena to every lane so the lanes don't carry their own backings).
|
||||
//
|
||||
// Reserve borrows; the slice is valid until the next Reset. The slab grows
|
||||
// (by allocating a fresh, larger backing array) if a Reserve doesn't fit;
|
||||
// pre-size the arena via NewArena to avoid that path on the hot path.
|
||||
type Arena struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewArena returns an Arena with a pre-allocated backing of the given
|
||||
// capacity. Pass 0 if you don't intend to call Reserve (e.g. a test that
|
||||
// only feeds the coalescer pre-made []byte packets via Commit).
|
||||
func NewArena(capacity int) *Arena {
|
||||
return &Arena{buf: make([]byte, 0, capacity)}
|
||||
}
|
||||
|
||||
// Reserve hands out a non-overlapping sz-byte slice from the arena. If the
|
||||
// request doesn't fit the current backing, a fresh, larger backing is
|
||||
// allocated; already-borrowed slices reference the old backing and remain
|
||||
// valid until Reset.
|
||||
func (a *Arena) Reserve(sz int) []byte {
|
||||
if len(a.buf)+sz > cap(a.buf) {
|
||||
newCap := max(cap(a.buf)*2, sz)
|
||||
a.buf = make([]byte, 0, newCap)
|
||||
}
|
||||
start := len(a.buf)
|
||||
a.buf = a.buf[:start+sz]
|
||||
return a.buf[start : start+sz : start+sz]
|
||||
}
|
||||
|
||||
// Reset releases every slice handed out since the last Reset. Callers must
|
||||
// not use any previously-borrowed slice after this returns. The underlying
|
||||
// backing array is retained so subsequent Reserves don't re-allocate.
|
||||
func (a *Arena) Reset() {
|
||||
a.buf = a.buf[:0]
|
||||
}
|
||||
|
||||
@@ -2,8 +2,10 @@ package batch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/tio"
|
||||
"github.com/slackhq/nebula/util"
|
||||
)
|
||||
|
||||
// MultiCoalescer fans plaintext packets out to lane-specific batchers based
|
||||
@@ -35,7 +37,7 @@ type MultiCoalescer struct {
|
||||
// sequentially and never Reserves in between, so a later lane's
|
||||
// slots stay readable across an earlier lane's Reset (the underlying
|
||||
// bytes are still alive — Reset only re-slices len to 0).
|
||||
arena *Arena
|
||||
arena *util.Arena
|
||||
}
|
||||
|
||||
// DefaultMultiArenaCap is the recommended arena capacity for a Multi-lane
|
||||
@@ -49,9 +51,9 @@ const DefaultMultiArenaCap = initialSlots * 65535
|
||||
// Either lane disabled redirects its traffic into the passthrough lane.
|
||||
// arena is the single backing slab shared across every lane; the caller
|
||||
// pre-sizes it via NewArena so the hot path never allocates.
|
||||
func NewMultiCoalescer(w io.Writer, l *slog.Logger, arena *Arena, tcpEnabled, udpEnabled bool) *MultiCoalescer {
|
||||
func NewMultiCoalescer(w tio.Queue, l *slog.Logger, arena *util.Arena, tcpEnabled, udpEnabled bool) *MultiCoalescer {
|
||||
m := &MultiCoalescer{
|
||||
pt: NewPassthrough(w, arena),
|
||||
pt: NewPassthrough(w, initialSlots, arena),
|
||||
arena: arena,
|
||||
}
|
||||
if tcpEnabled {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/slackhq/nebula/util"
|
||||
)
|
||||
|
||||
// Passthrough is a RxBatcher that doesn't batch anything, it just accumulates and then sends packets.
|
||||
@@ -11,7 +12,7 @@ type Passthrough struct {
|
||||
out io.Writer
|
||||
slots [][]byte
|
||||
// arena is injected; see TCPCoalescer.arena for the contract.
|
||||
arena *Arena
|
||||
arena *util.Arena
|
||||
cursor int
|
||||
}
|
||||
|
||||
@@ -21,7 +22,7 @@ const passthroughBaseNumSlots = 128
|
||||
// standalone Passthrough batcher: 128 slots × udp.MTU ≈ 1.1 MiB.
|
||||
const DefaultPassthroughArenaCap = passthroughBaseNumSlots * udp.MTU
|
||||
|
||||
func NewPassthrough(w io.Writer, slots int, arena *Arena) *Passthrough {
|
||||
func NewPassthrough(w io.Writer, slots int, arena *util.Arena) *Passthrough {
|
||||
return &Passthrough{
|
||||
out: w,
|
||||
slots: make([][]byte, 0, slots),
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/tio"
|
||||
"github.com/slackhq/nebula/util"
|
||||
"github.com/slackhq/nebula/wire"
|
||||
)
|
||||
|
||||
// ipProtoTCP is the IANA protocol number for TCP. Hardcoded instead of
|
||||
@@ -88,11 +90,11 @@ type TCPCoalescer struct {
|
||||
// and tells it to release them via Reset on Flush. When wrapped in
|
||||
// MultiCoalescer the same *Arena is shared with the other lanes so
|
||||
// there's exactly one backing slab per Multi instance.
|
||||
arena *Arena
|
||||
arena *util.Arena
|
||||
l *slog.Logger
|
||||
}
|
||||
|
||||
func NewTCPCoalescer(w io.Writer, l *slog.Logger, arena *Arena) *TCPCoalescer {
|
||||
func NewTCPCoalescer(w tio.Queue, l *slog.Logger, arena *util.Arena) *TCPCoalescer {
|
||||
c := &TCPCoalescer{
|
||||
plainW: w,
|
||||
slots: make([]*coalesceSlot, 0, initialSlots),
|
||||
@@ -101,7 +103,7 @@ func NewTCPCoalescer(w io.Writer, l *slog.Logger, arena *Arena) *TCPCoalescer {
|
||||
arena: arena,
|
||||
l: l,
|
||||
}
|
||||
if gw, ok := tio.SupportsGSO(w, tio.GSOProtoTCP); ok {
|
||||
if gw, ok := tio.SupportsGSO(w, wire.GSOProtoTCP); ok {
|
||||
c.gsoW = gw
|
||||
}
|
||||
return c
|
||||
@@ -419,7 +421,7 @@ func (c *TCPCoalescer) flushSlot(s *coalesceSlot) error {
|
||||
tcsum := s.ipHdrLen + 16
|
||||
binary.BigEndian.PutUint16(hdr[tcsum:tcsum+2], foldOnceNoInvert(psum))
|
||||
|
||||
return c.gsoW.WriteGSO(hdr[:s.ipHdrLen], hdr[s.ipHdrLen:], s.payIovs, tio.GSOProtoTCP)
|
||||
return c.gsoW.WriteGSO(hdr[:s.ipHdrLen], hdr[s.ipHdrLen:], s.payIovs, wire.GSOProtoTCP)
|
||||
}
|
||||
|
||||
// headersMatch compares two IP+TCP header prefixes for byte-for-byte
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/tio"
|
||||
"github.com/slackhq/nebula/util"
|
||||
"github.com/slackhq/nebula/wire"
|
||||
)
|
||||
|
||||
// ipProtoUDP is the IANA protocol number for UDP.
|
||||
@@ -67,7 +69,7 @@ type UDPCoalescer struct {
|
||||
pool []*udpSlot
|
||||
|
||||
// arena is injected; see TCPCoalescer.arena for the contract.
|
||||
arena *Arena
|
||||
arena *util.Arena
|
||||
}
|
||||
|
||||
// NewUDPCoalescer wraps w. The caller is responsible for only constructing
|
||||
@@ -75,7 +77,7 @@ type UDPCoalescer struct {
|
||||
// the kernel may reject GSO_UDP_L4 writes. If w does not implement
|
||||
// tio.GSOWriter at all (single-packet Queue), the coalescer degrades to
|
||||
// plain Writes — same defensive shape as the TCP coalescer.
|
||||
func NewUDPCoalescer(w io.Writer, arena *Arena) *UDPCoalescer {
|
||||
func NewUDPCoalescer(w tio.Queue, arena *util.Arena) *UDPCoalescer {
|
||||
c := &UDPCoalescer{
|
||||
plainW: w,
|
||||
slots: make([]*udpSlot, 0, initialSlots),
|
||||
@@ -83,7 +85,7 @@ func NewUDPCoalescer(w io.Writer, arena *Arena) *UDPCoalescer {
|
||||
pool: make([]*udpSlot, 0, initialSlots),
|
||||
arena: arena,
|
||||
}
|
||||
if gw, ok := tio.SupportsGSO(w, tio.GSOProtoUDP); ok {
|
||||
if gw, ok := tio.SupportsGSO(w, wire.GSOProtoUDP); ok {
|
||||
c.gsoW = gw
|
||||
}
|
||||
return c
|
||||
@@ -313,7 +315,7 @@ func (c *UDPCoalescer) flushSlot(s *udpSlot) error {
|
||||
udpCsumOff := s.ipHdrLen + 6
|
||||
binary.BigEndian.PutUint16(hdr[udpCsumOff:udpCsumOff+2], foldOnceNoInvert(psum))
|
||||
|
||||
return c.gsoW.WriteGSO(hdr[:s.ipHdrLen], hdr[s.ipHdrLen:], s.payIovs, tio.GSOProtoUDP)
|
||||
return c.gsoW.WriteGSO(hdr[:s.ipHdrLen], hdr[s.ipHdrLen:], s.payIovs, wire.GSOProtoUDP)
|
||||
}
|
||||
|
||||
// udpHeadersMatch compares two IP+UDP header prefixes for byte-equality on
|
||||
|
||||
Reference in New Issue
Block a user