Compare commits

..

16 Commits

Author SHA1 Message Date
Wade Simmons
d6c5c00ef7 Merge remote-tracking branch 'origin/release-1.9' into master
Discard all changes from release-1.9 and keep only master, this is just
to get the 1.9.7 tag into our history.
2025-10-28 15:53:51 -04:00
Nate Brown
7c3f533950 Better words (#1497) 2025-10-10 10:31:46 -05:00
Nate Brown
824cd3f0d6 Update CHANGELOG for Nebula v1.9.7 2025-10-07 21:10:16 -05:00
Nate Brown
9f692175e1 HostInfo.remoteCidr should only be populated with the entire vpn ip address issued in the certificate (#1494) 2025-10-07 17:35:58 -05:00
Nate Brown
22af56f156 Fix recv_error receipt limit allowance for v1.9.x (#1459)
* Fix recv_error receipt limit allowance

* backport #1463 recv_error behavior changes

---------

Co-authored-by: JackDoan <me@jackdoan.com>
2025-09-04 15:52:32 -05:00
brad-defined
1d73e463cd Quietly log error on UDP_NETRESET ioctl on Windows. (#1453)
* Quietly log error on UDP_NETRESET ioctl on Windows.

* dampen unexpected error warnings
2025-08-19 17:33:31 -04:00
brad-defined
105e0ec66c v1.9.6 (#1434)
Update CHANGELOG for Nebula v1.9.6
2025-07-18 08:39:33 -04:00
Nate Brown
4870bb680d Darwin udp fix (#1426) 2025-07-01 16:41:29 -05:00
brad-defined
a1498ca8f8 Store relay states in a slice for consistent ordering (#1422) 2025-06-24 12:04:00 -04:00
Nate Brown
9877648da9 Drop inactive tunnels (#1413) 2025-06-23 11:32:50 -05:00
brad-defined
8e0a7bcbb7 Disable UDP receive error returns due to ICMP messages on Windows. (#1412) 2025-05-22 08:55:45 -04:00
brad-defined
8c29b15c6d fix relay migration panic (#1403) 2025-05-13 14:58:58 -04:00
brad-defined
04d7a8ccba Retry UDP receive on Windows in some receive error cases (#1404) 2025-05-13 14:58:37 -04:00
Nate Brown
b55b9019a7 v1.9.5 (#1285)
Update CHANGELOG for Nebula v1.9.5
2024-12-06 09:50:24 -05:00
Nate Brown
2e85d138cd [v1.9.x] do not panic when loading a V2 CA certificate (#1282)
Co-authored-by: Jack Doan <jackdoan@rivian.com>
2024-12-03 09:49:54 -06:00
brad-defined
9bfdfbafc1 Backport reestablish relays from cert-v2 to release-1.9 (#1277) 2024-11-20 21:49:53 -06:00
6 changed files with 13 additions and 679 deletions

View File

@@ -7,13 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Added
- Experimental Linux UDP offload support: enable `listen.enable_gso` and
`listen.enable_gro` to activate UDP_SEGMENT batching and GRO receive
splitting. Includes automatic capability probing, per-packet fallbacks, and
runtime metrics/logs for visibility.
### Changed
- `default_local_cidr_any` now defaults to false, meaning that any firewall rule

View File

@@ -1,17 +0,0 @@
//go:build linux && (386 || amd64p32 || arm || mips || mipsle) && !android && !e2e_testing
// +build linux
// +build 386 amd64p32 arm mips mipsle
// +build !android
// +build !e2e_testing
package udp
import "golang.org/x/sys/unix"
func controllen(n int) uint32 {
return uint32(n)
}
func setCmsgLen(h *unix.Cmsghdr, n int) {
h.Len = uint32(unix.CmsgLen(n))
}

View File

@@ -1,17 +0,0 @@
//go:build linux && (amd64 || arm64 || ppc64 || ppc64le || mips64 || mips64le || s390x || riscv64 || loong64) && !android && !e2e_testing
// +build linux
// +build amd64 arm64 ppc64 ppc64le mips64 mips64le s390x riscv64 loong64
// +build !android
// +build !e2e_testing
package udp
import "golang.org/x/sys/unix"
func controllen(n int) uint64 {
return uint64(n)
}
func setCmsgLen(h *unix.Cmsghdr, n int) {
h.Len = uint64(unix.CmsgLen(n))
}

View File

@@ -5,14 +5,10 @@ package udp
import (
"encoding/binary"
"errors"
"fmt"
"net"
"net/netip"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/rcrowley/go-metrics"
@@ -21,38 +17,11 @@ import (
"golang.org/x/sys/unix"
)
const (
defaultGSOMaxSegments = 64
defaultGSOMaxBytes = 64000
defaultGROReadBufferSize = 2 * defaultGSOMaxBytes
defaultGSOFlushTimeout = 100 * time.Microsecond
)
type StdConn struct {
sysFd int
isV4 bool
l *logrus.Logger
batch int
enableGRO bool
enableGSO bool
controlLen atomic.Int32
gsoMu sync.Mutex
gsoPendingBuf []byte
gsoPendingSegments int
gsoPendingAddr netip.AddrPort
gsoPendingSegSize int
gsoMaxSegments int
gsoMaxBytes int
gsoFlushTimeout time.Duration
gsoFlushTimer *time.Timer
gsoControlBuf []byte
gsoBatches metrics.Counter
gsoSegments metrics.Counter
groSegments metrics.Counter
}
func maybeIPV4(ip net.IP) (net.IP, bool) {
@@ -100,18 +69,7 @@ func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch in
return nil, fmt.Errorf("unable to bind to socket: %s", err)
}
return &StdConn{
sysFd: fd,
isV4: ip.Is4(),
l: l,
batch: batch,
gsoMaxSegments: defaultGSOMaxSegments,
gsoMaxBytes: defaultGSOMaxBytes,
gsoFlushTimeout: defaultGSOFlushTimeout,
gsoBatches: metrics.GetOrRegisterCounter("udp.gso.batches", nil),
gsoSegments: metrics.GetOrRegisterCounter("udp.gso.segments", nil),
groSegments: metrics.GetOrRegisterCounter("udp.gro.segments", nil),
}, err
return &StdConn{sysFd: fd, isV4: ip.Is4(), l: l, batch: batch}, err
}
func (u *StdConn) Rebind() error {
@@ -163,27 +121,13 @@ func (u *StdConn) LocalAddr() (netip.AddrPort, error) {
func (u *StdConn) ListenOut(r EncReader) {
var ip netip.Addr
msgs, buffers, names, controls := u.PrepareRawMessages(u.batch)
msgs, buffers, names := u.PrepareRawMessages(u.batch)
read := u.ReadMulti
if u.batch == 1 {
read = u.ReadSingle
}
for {
//desiredControl := int(u.controlLen.Load())
//hasControl := len(controls) > 0
//if (desiredControl > 0) != hasControl || (desiredControl > 0 && hasControl && len(controls[0]) != desiredControl) {
// msgs, buffers, names, controls = u.PrepareRawMessages(u.batch)
// hasControl = len(controls) > 0
//}
//
for i := range msgs {
if len(controls) <= i || len(controls[i]) == 0 {
continue
}
msgs[i].Hdr.Controllen = controllen(len(controls[i]))
}
n, err := read(msgs)
if err != nil {
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
@@ -191,38 +135,13 @@ func (u *StdConn) ListenOut(r EncReader) {
}
for i := 0; i < n; i++ {
payloadLen := int(msgs[i].Len)
if payloadLen == 0 {
continue
}
// Its ok to skip the ok check here, the slicing is the only error that can occur and it will panic
if u.isV4 {
ip, _ = netip.AddrFromSlice(names[i][4:8])
} else {
ip, _ = netip.AddrFromSlice(names[i][8:24])
}
addr := netip.AddrPortFrom(ip.Unmap(), binary.BigEndian.Uint16(names[i][2:4]))
if len(controls) > i && len(controls[i]) > 0 {
if segSize, segCount := u.parseGROSegment(&msgs[i], controls[i]); segSize > 0 && segSize < payloadLen {
if u.emitSegments(r, addr, buffers[i][:payloadLen], segSize, segCount) {
continue
}
if segCount > 1 {
u.l.WithFields(logrus.Fields{
"tag": "gro-debug",
"stage": "listen_out",
"reason": "emit_failed",
"payload_len": payloadLen,
"seg_size": segSize,
"seg_count": segCount,
}).Debug("gro-debug fallback to single packet")
}
}
}
r(addr, buffers[i][:payloadLen])
r(netip.AddrPortFrom(ip.Unmap(), binary.BigEndian.Uint16(names[i][2:4])), buffers[i][:msgs[i].Len])
}
}
}
@@ -269,13 +188,6 @@ func (u *StdConn) ReadMulti(msgs []rawMessage) (int, error) {
}
func (u *StdConn) WriteTo(b []byte, ip netip.AddrPort) error {
if u.enableGSO {
if err := u.writeToGSO(b, ip); err != nil {
return err
}
return nil
}
if u.isV4 {
return u.writeTo4(b, ip)
}
@@ -336,494 +248,6 @@ func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error {
}
}
func (u *StdConn) writeToGSO(b []byte, addr netip.AddrPort) error {
if len(b) == 0 {
return nil
}
if !addr.IsValid() {
return u.directWrite(b, addr)
}
u.gsoMu.Lock()
defer u.gsoMu.Unlock()
if cap(u.gsoPendingBuf) < u.gsoMaxBytes { //I feel like this is bad?
u.gsoPendingBuf = make([]byte, 0, u.gsoMaxBytes)
}
if u.gsoPendingSegments > 0 && u.gsoPendingAddr != addr {
if err := u.flushPendingLocked(); err != nil {
return err
}
}
if len(b) > u.gsoMaxBytes || u.gsoMaxSegments <= 1 {
if err := u.flushPendingLocked(); err != nil {
return err
}
return u.directWrite(b, addr)
}
if u.gsoPendingSegments == 0 {
u.gsoPendingAddr = addr
u.gsoPendingSegSize = len(b)
} else {
if len(b) > u.gsoPendingSegSize {
if err := u.flushPendingLocked(); err != nil {
return err
}
u.gsoPendingAddr = addr
u.gsoPendingSegSize = len(b)
} else if len(b) < u.gsoPendingSegSize {
if err := u.flushPendingLocked(); err != nil {
return err
}
u.gsoPendingAddr = addr
u.gsoPendingSegSize = len(b)
}
}
inBuf := len(u.gsoPendingBuf) + len(b)
if len(u.gsoPendingBuf)+len(b) > u.gsoMaxBytes {
if err := u.flushPendingLocked(); err != nil {
return err
}
u.gsoPendingAddr = addr
u.gsoPendingSegSize = len(b)
}
u.gsoPendingBuf = append(u.gsoPendingBuf, b...)
u.gsoPendingSegments++
if u.gsoPendingSegments >= u.gsoMaxSegments {
return u.flushPendingLocked()
}
if u.gsoFlushTimeout <= 0 {
return u.flushPendingLocked()
}
u.scheduleFlushLocked(inBuf)
return nil
}
func (u *StdConn) flushPendingLocked() error {
if u.gsoPendingSegments == 0 {
u.stopFlushTimerLocked()
return nil
}
buf := u.gsoPendingBuf[:len(u.gsoPendingBuf)]
addr := u.gsoPendingAddr
segSize := u.gsoPendingSegSize
segments := u.gsoPendingSegments
u.stopFlushTimerLocked()
var err error
if segments <= 1 || !u.enableGSO {
err = u.directWrite(buf, addr)
} else {
err = u.sendSegmentedLocked(buf, addr, segSize)
if err != nil && (errors.Is(err, unix.EOPNOTSUPP) || errors.Is(err, unix.ENOTSUP)) {
u.enableGSO = false
u.l.WithError(err).Warn("UDP GSO not supported, disabling")
err = u.sendSequentialLocked(buf, addr, segSize)
}
}
if err == nil && segments > 1 && u.enableGSO {
if u.gsoBatches != nil {
u.gsoBatches.Inc(1)
}
if u.gsoSegments != nil {
u.gsoSegments.Inc(int64(segments))
}
}
u.gsoPendingBuf = u.gsoPendingBuf[:0]
u.gsoPendingSegments = 0
u.gsoPendingSegSize = 0
u.gsoPendingAddr = netip.AddrPort{}
return err
}
func (u *StdConn) sendSegmentedLocked(buf []byte, addr netip.AddrPort, segSize int) error {
if len(buf) == 0 {
return nil
}
if segSize <= 0 {
segSize = len(buf)
}
if len(u.gsoControlBuf) < unix.CmsgSpace(2) {
u.gsoControlBuf = make([]byte, unix.CmsgSpace(2))
}
control := u.gsoControlBuf[:unix.CmsgSpace(2)]
for i := range control {
control[i] = 0
}
hdr := (*unix.Cmsghdr)(unsafe.Pointer(&control[0]))
setCmsgLen(hdr, 2)
hdr.Level = unix.SOL_UDP
hdr.Type = unix.UDP_SEGMENT
dataOff := unix.CmsgLen(0)
binary.NativeEndian.PutUint16(control[dataOff:dataOff+2], uint16(segSize))
var sa unix.Sockaddr
if u.isV4 {
sa4 := &unix.SockaddrInet4{Port: int(addr.Port())}
sa4.Addr = addr.Addr().As4()
sa = sa4
} else {
sa6 := &unix.SockaddrInet6{Port: int(addr.Port())}
sa6.Addr = addr.Addr().As16()
sa = sa6
}
for {
n, err := unix.SendmsgN(u.sysFd, buf, control[:unix.CmsgSpace(2)], sa, 0)
if err != nil {
if err == unix.EINTR {
continue
}
return &net.OpError{Op: "sendmsg", Err: err}
}
if n != len(buf) {
return &net.OpError{Op: "sendmsg", Err: unix.EIO}
}
return nil
}
}
func (u *StdConn) sendSequentialLocked(buf []byte, addr netip.AddrPort, segSize int) error {
if len(buf) == 0 {
return nil
}
if segSize <= 0 {
segSize = len(buf)
}
for offset := 0; offset < len(buf); offset += segSize {
end := offset + segSize
if end > len(buf) {
end = len(buf)
}
var err error
if u.isV4 {
err = u.writeTo4(buf[offset:end], addr)
} else {
err = u.writeTo6(buf[offset:end], addr)
}
if err != nil {
return err
}
if end == len(buf) {
break
}
}
return nil
}
func (u *StdConn) scheduleFlushLocked(inBuf int) {
if u.gsoFlushTimeout <= 0 {
_ = u.flushPendingLocked()
return
}
t := u.gsoFlushTimeout
if inBuf > u.gsoMaxBytes/2 {
t = t / 2
}
if u.gsoFlushTimer == nil {
u.gsoFlushTimer = time.AfterFunc(t, u.flushTimerHandler)
return
}
if !u.gsoFlushTimer.Stop() {
// timer already fired or running; allow handler to exit if no data
}
u.gsoFlushTimer.Reset(t)
}
func (u *StdConn) stopFlushTimerLocked() {
if u.gsoFlushTimer != nil {
u.gsoFlushTimer.Stop()
}
}
func (u *StdConn) flushTimerHandler() {
//u.l.Warn("timer hit")
u.gsoMu.Lock()
defer u.gsoMu.Unlock()
if u.gsoPendingSegments == 0 {
return
}
if err := u.flushPendingLocked(); err != nil {
u.l.WithError(err).Warn("Failed to flush GSO batch")
}
}
func (u *StdConn) directWrite(b []byte, addr netip.AddrPort) error {
if u.isV4 {
return u.writeTo4(b, addr)
}
return u.writeTo6(b, addr)
}
func (u *StdConn) emitSegments(r EncReader, addr netip.AddrPort, payload []byte, segSize, segCount int) bool {
if segSize <= 0 || segSize >= len(payload) {
if u.l.Level >= logrus.DebugLevel {
u.l.WithFields(logrus.Fields{
"tag": "gro-debug",
"stage": "emit",
"reason": "invalid_seg_size",
"payload_len": len(payload),
"seg_size": segSize,
"seg_count": segCount,
}).Debug("gro-debug skip emit")
}
return false
}
totalLen := len(payload)
if segCount <= 0 {
segCount = (totalLen + segSize - 1) / segSize
}
if segCount <= 1 {
if u.l.Level >= logrus.DebugLevel {
u.l.WithFields(logrus.Fields{
"tag": "gro-debug",
"stage": "emit",
"reason": "single_segment",
"payload_len": totalLen,
"seg_size": segSize,
"seg_count": segCount,
}).Debug("gro-debug skip emit")
}
return false
}
//segments := make([][]byte, 0, segCount)
start := 0
//var firstHeader header.H
//firstParsed := false
//var firstCounter uint64
//var firstRemote uint32
numSegments := 0
//for start < totalLen && len(segments) < segCount {
for start < totalLen && numSegments < segCount {
end := start + segSize
if end > totalLen {
end = totalLen
}
//segment := append([]byte(nil), payload[start:end]...)
//q := numSegments % 4 //TODO
r(addr, payload[start:end])
numSegments++
//segments = append(segments, segment)
start = end
//if !firstParsed {
// if err := firstHeader.Parse(segment); err == nil {
// firstParsed = true
// firstCounter = firstHeader.MessageCounter
// firstRemote = firstHeader.RemoteIndex
// } else if u.l.IsLevelEnabled(logrus.DebugLevel) {
// u.l.WithFields(logrus.Fields{
// "tag": "gro-debug",
// "stage": "emit",
// "event": "parse_fail",
// "seg_index": len(segments) - 1,
// "seg_size": segSize,
// "seg_count": segCount,
// "payload_len": totalLen,
// "err": err,
// }).Debug("gro-debug segment parse failed")
// }
//}
}
//for idx, segment := range segments {
// r(addr, segment)
//if idx == len(segments)-1 && len(segment) < segSize && u.l.IsLevelEnabled(logrus.DebugLevel) {
// var tail header.H
// if err := tail.Parse(segment); err == nil {
// u.l.WithFields(logrus.Fields{
// "tag": "gro-debug",
// "stage": "emit",
// "event": "tail_segment",
// "segment_len": len(segment),
// "remote_index": tail.RemoteIndex,
// "message_counter": tail.MessageCounter,
// }).Debug("gro-debug tail segment metadata")
// } else {
// u.l.WithError(err).Warn("Failed to parse tail segment")
// }
//}
//}
if u.groSegments != nil {
//u.groSegments.Inc(int64(len(segments)))
u.groSegments.Inc(int64(numSegments))
}
//if len(segments) > 0 {
// lastLen := len(segments[len(segments)-1])
// if u.l.IsLevelEnabled(logrus.DebugLevel) {
// u.l.WithFields(logrus.Fields{
// "tag": "gro-debug",
// "stage": "emit",
// "event": "success",
// "payload_len": totalLen,
// "seg_size": segSize,
// "seg_count": segCount,
// "actual_segs": len(segments),
// "last_seg_len": lastLen,
// "addr": addr.String(),
// "first_remote": firstRemote,
// "first_counter": firstCounter,
// }).Debug("gro-debug emit")
// }
//}
return true
}
func (u *StdConn) parseGROSegment(msg *rawMessage, control []byte) (int, int) {
ctrlLen := int(msg.Hdr.Controllen)
if ctrlLen <= 0 {
return 0, 0
}
if ctrlLen > len(control) {
ctrlLen = len(control)
}
cmsgs, err := unix.ParseSocketControlMessage(control[:ctrlLen])
if err != nil {
u.l.WithError(err).Debug("failed to parse UDP GRO control message")
return 0, 0
}
for _, c := range cmsgs {
if c.Header.Level == unix.SOL_UDP && c.Header.Type == unix.UDP_GRO && len(c.Data) >= 2 {
segSize := int(binary.NativeEndian.Uint16(c.Data[:2]))
segCount := 0
if len(c.Data) >= 4 {
segCount = int(binary.NativeEndian.Uint16(c.Data[2:4]))
}
if u.l.Level >= logrus.DebugLevel {
u.l.WithFields(logrus.Fields{
"tag": "gro-debug",
"stage": "parse",
"seg_size": segSize,
"seg_count": segCount,
}).Debug("gro-debug control parsed")
}
return segSize, segCount
}
}
return 0, 0
}
func (u *StdConn) configureGRO(enable bool) {
if enable == u.enableGRO {
if enable {
u.controlLen.Store(int32(unix.CmsgSpace(2)))
} else {
u.controlLen.Store(0)
}
return
}
if enable {
if err := unix.SetsockoptInt(u.sysFd, unix.SOL_UDP, unix.UDP_GRO, 1); err != nil {
u.l.WithError(err).Warn("Failed to enable UDP GRO")
u.enableGRO = false
u.controlLen.Store(0)
return
}
u.enableGRO = true
u.controlLen.Store(int32(unix.CmsgSpace(2)))
u.l.Info("UDP GRO enabled")
} else {
if u.enableGRO {
if err := unix.SetsockoptInt(u.sysFd, unix.SOL_UDP, unix.UDP_GRO, 0); err != nil {
u.l.WithError(err).Warn("Failed to disable UDP GRO")
}
}
u.enableGRO = false
u.controlLen.Store(0)
}
}
func (u *StdConn) configureGSO(enable bool, c *config.C) {
u.gsoMu.Lock()
defer u.gsoMu.Unlock()
if !enable {
if u.enableGSO {
if err := u.flushPendingLocked(); err != nil {
u.l.WithError(err).Warn("Failed to flush GSO buffers while disabling")
}
u.enableGSO = false
if u.gsoFlushTimer != nil {
u.gsoFlushTimer.Stop()
}
u.l.Info("UDP GSO disabled")
}
return
}
maxSegments := c.GetInt("listen.gso_max_segments", defaultGSOMaxSegments)
if maxSegments < 2 {
maxSegments = 2
}
maxBytes := c.GetInt("listen.gso_max_bytes", 0)
if maxBytes <= 0 {
maxBytes = defaultGSOMaxBytes
}
if maxBytes < MTU {
maxBytes = MTU
}
flushTimeout := c.GetDuration("listen.gso_flush_timeout", defaultGSOFlushTimeout)
if flushTimeout < 0 {
flushTimeout = 0
}
u.enableGSO = true
u.gsoMaxSegments = maxSegments
u.gsoMaxBytes = maxBytes
u.gsoFlushTimeout = flushTimeout
if cap(u.gsoPendingBuf) < u.gsoMaxBytes {
u.gsoPendingBuf = make([]byte, 0, u.gsoMaxBytes)
} else {
u.gsoPendingBuf = u.gsoPendingBuf[:0]
}
if len(u.gsoControlBuf) < unix.CmsgSpace(2) {
u.gsoControlBuf = make([]byte, unix.CmsgSpace(2))
}
u.l.WithFields(logrus.Fields{
"segments": u.gsoMaxSegments,
"bytes": u.gsoMaxBytes,
"flush_timeout": u.gsoFlushTimeout,
}).Info("UDP GSO configured")
}
func (u *StdConn) ReloadConfig(c *config.C) {
b := c.GetInt("listen.read_buffer", 0)
if b > 0 {
@@ -870,9 +294,6 @@ func (u *StdConn) ReloadConfig(c *config.C) {
u.l.WithError(err).Error("Failed to set listen.so_mark")
}
}
u.configureGRO(c.GetBool("listen.enable_gro", false))
u.configureGSO(c.GetBool("listen.enable_gso", false), c)
}
func (u *StdConn) getMemInfo(meminfo *[unix.SK_MEMINFO_VARS]uint32) error {
@@ -885,15 +306,7 @@ func (u *StdConn) getMemInfo(meminfo *[unix.SK_MEMINFO_VARS]uint32) error {
}
func (u *StdConn) Close() error {
u.gsoMu.Lock()
flushErr := u.flushPendingLocked()
u.gsoMu.Unlock()
closeErr := syscall.Close(u.sysFd)
if flushErr != nil {
return flushErr
}
return closeErr
return syscall.Close(u.sysFd)
}
func NewUDPStatsEmitter(udpConns []Conn) func() {

View File

@@ -30,24 +30,13 @@ type rawMessage struct {
Len uint32
}
func (u *StdConn) PrepareRawMessages(n int) ([]rawMessage, [][]byte, [][]byte, [][]byte) {
controlLen := int(u.controlLen.Load())
func (u *StdConn) PrepareRawMessages(n int) ([]rawMessage, [][]byte, [][]byte) {
msgs := make([]rawMessage, n)
buffers := make([][]byte, n)
names := make([][]byte, n)
var controls [][]byte
if controlLen > 0 {
controls = make([][]byte, n)
}
for i := range msgs {
size := MTU
if defaultGROReadBufferSize > size {
size = defaultGROReadBufferSize
}
buffers[i] = make([]byte, size)
buffers[i] = make([]byte, MTU)
names[i] = make([]byte, unix.SizeofSockaddrInet6)
vs := []iovec{
@@ -59,16 +48,7 @@ func (u *StdConn) PrepareRawMessages(n int) ([]rawMessage, [][]byte, [][]byte, [
msgs[i].Hdr.Name = &names[i][0]
msgs[i].Hdr.Namelen = uint32(len(names[i]))
if controlLen > 0 {
controls[i] = make([]byte, controlLen)
msgs[i].Hdr.Control = &controls[i][0]
msgs[i].Hdr.Controllen = controllen(len(controls[i]))
} else {
msgs[i].Hdr.Control = nil
msgs[i].Hdr.Controllen = controllen(0)
}
}
return msgs, buffers, names, controls
return msgs, buffers, names
}

View File

@@ -33,43 +33,25 @@ type rawMessage struct {
Pad0 [4]byte
}
func (u *StdConn) PrepareRawMessages(n int) ([]rawMessage, [][]byte, [][]byte, [][]byte) {
controlLen := int(u.controlLen.Load())
func (u *StdConn) PrepareRawMessages(n int) ([]rawMessage, [][]byte, [][]byte) {
msgs := make([]rawMessage, n)
buffers := make([][]byte, n)
names := make([][]byte, n)
var controls [][]byte
if controlLen > 0 {
controls = make([][]byte, n)
}
for i := range msgs {
size := MTU
if defaultGROReadBufferSize > size {
size = defaultGROReadBufferSize
}
buffers[i] = make([]byte, size)
buffers[i] = make([]byte, MTU)
names[i] = make([]byte, unix.SizeofSockaddrInet6)
vs := []iovec{{Base: &buffers[i][0], Len: uint64(len(buffers[i]))}}
vs := []iovec{
{Base: &buffers[i][0], Len: uint64(len(buffers[i]))},
}
msgs[i].Hdr.Iov = &vs[0]
msgs[i].Hdr.Iovlen = uint64(len(vs))
msgs[i].Hdr.Name = &names[i][0]
msgs[i].Hdr.Namelen = uint32(len(names[i]))
if controlLen > 0 {
controls[i] = make([]byte, controlLen)
msgs[i].Hdr.Control = &controls[i][0]
msgs[i].Hdr.Controllen = controllen(len(controls[i]))
} else {
msgs[i].Hdr.Control = nil
msgs[i].Hdr.Controllen = controllen(0)
}
}
return msgs, buffers, names, controls
return msgs, buffers, names
}