mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 16:34:25 +01:00
Compare commits
46 Commits
synctrace
...
batched-pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d128a3254 | ||
|
|
c8980d34cf | ||
|
|
98f264cf14 | ||
|
|
aa44f4c7c9 | ||
|
|
419157c407 | ||
|
|
0864852d33 | ||
|
|
2b5aec9a18 | ||
|
|
f0665bee20 | ||
|
|
11da0baab1 | ||
|
|
608904b9dd | ||
|
|
fd1c52127f | ||
|
|
01909f4715 | ||
|
|
770147264d | ||
|
|
fa8c013b97 | ||
|
|
2710f2af06 | ||
|
|
ad6d3e6bac | ||
|
|
2b0aa74e85 | ||
|
|
b126d88963 | ||
|
|
45c1d3eab3 | ||
|
|
634181ba66 | ||
|
|
eb89839d13 | ||
|
|
fb7f0c3657 | ||
|
|
b1f53d8d25 | ||
|
|
8824eeaea2 | ||
|
|
071589f7c7 | ||
|
|
f1e992f6dd | ||
|
|
1ea5f776d7 | ||
|
|
4cdeb284ef | ||
|
|
5cccd39465 | ||
|
|
8196c22b5a | ||
|
|
65cc253c19 | ||
|
|
73cfa7b5b1 | ||
|
|
768325c9b4 | ||
|
|
932e329164 | ||
|
|
4bea299265 | ||
|
|
5cff83b282 | ||
|
|
7da79685ff | ||
|
|
91eff03418 | ||
|
|
52623820c2 | ||
|
|
c2420642a0 | ||
|
|
b3a1f7b0a3 | ||
|
|
94142aded5 | ||
|
|
b158eb0c4c | ||
|
|
e4b7dbcfb0 | ||
|
|
882edf11d7 | ||
|
|
d34c2b8e06 |
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -17,5 +17,5 @@ contact_links:
|
|||||||
about: 'The documentation is the best place to start if you are new to Nebula.'
|
about: 'The documentation is the best place to start if you are new to Nebula.'
|
||||||
|
|
||||||
- name: 💁 Support/Chat
|
- name: 💁 Support/Chat
|
||||||
url: https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ
|
url: https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA
|
||||||
about: 'For faster support, join us on Slack for assistance!'
|
about: 'For faster support, join us on Slack for assistance!'
|
||||||
|
|||||||
4
.github/workflows/gofmt.yml
vendored
4
.github/workflows/gofmt.yml
vendored
@@ -16,9 +16,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Install goimports
|
- name: Install goimports
|
||||||
|
|||||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -12,9 +12,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -35,9 +35,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -68,9 +68,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Import certificates
|
- name: Import certificates
|
||||||
|
|||||||
4
.github/workflows/smoke-extra.yml
vendored
4
.github/workflows/smoke-extra.yml
vendored
@@ -22,9 +22,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: 'go.mod'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: add hashicorp source
|
- name: add hashicorp source
|
||||||
|
|||||||
4
.github/workflows/smoke.yml
vendored
4
.github/workflows/smoke.yml
vendored
@@ -20,9 +20,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
|
|||||||
24
.github/workflows/test.yml
vendored
24
.github/workflows/test.yml
vendored
@@ -20,9 +20,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -32,9 +32,9 @@ jobs:
|
|||||||
run: make vet
|
run: make vet
|
||||||
|
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v7
|
uses: golangci/golangci-lint-action@v8
|
||||||
with:
|
with:
|
||||||
version: v2.0
|
version: v2.5
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: make test
|
||||||
@@ -58,9 +58,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -79,9 +79,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -100,9 +100,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Build nebula
|
- name: Build nebula
|
||||||
@@ -115,9 +115,9 @@ jobs:
|
|||||||
run: make vet
|
run: make vet
|
||||||
|
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v7
|
uses: golangci/golangci-lint-action@v8
|
||||||
with:
|
with:
|
||||||
version: v2.0
|
version: v2.5
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: make test
|
||||||
|
|||||||
3
Makefile
3
Makefile
@@ -61,7 +61,7 @@ ALL = $(ALL_LINUX) \
|
|||||||
windows-arm64
|
windows-arm64
|
||||||
|
|
||||||
e2e:
|
e2e:
|
||||||
$(TEST_ENV) go test -tags=synctrace,e2e_testing -count=1 $(TEST_FLAGS) ./e2e
|
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
|
||||||
|
|
||||||
e2ev: TEST_FLAGS += -v
|
e2ev: TEST_FLAGS += -v
|
||||||
e2ev: e2e
|
e2ev: e2e
|
||||||
@@ -215,7 +215,6 @@ ifeq ($(words $(MAKECMDGOALS)),1)
|
|||||||
@$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
|
@$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
|
||||||
endif
|
endif
|
||||||
|
|
||||||
bin-docker: BUILD_ARGS = -tags=synctrace
|
|
||||||
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
||||||
|
|
||||||
smoke-docker: bin-docker
|
smoke-docker: bin-docker
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ Further documentation can be found [here](https://nebula.defined.net/docs/).
|
|||||||
|
|
||||||
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
||||||
|
|
||||||
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ).
|
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA).
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
|
|||||||
164
batch_pipeline.go
Normal file
164
batch_pipeline.go
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
package nebula
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/overlay"
|
||||||
|
"github.com/slackhq/nebula/udp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// batchPipelines tracks whether the inside device can operate on packet batches
|
||||||
|
// and, if so, holds the shared packet pool sized for the virtio headroom and
|
||||||
|
// payload limits advertised by the device. It also owns the fan-in/fan-out
|
||||||
|
// queues between the TUN readers, encrypt/decrypt workers, and the UDP writers.
|
||||||
|
type batchPipelines struct {
|
||||||
|
enabled bool
|
||||||
|
inside overlay.BatchCapableDevice
|
||||||
|
headroom int
|
||||||
|
payloadCap int
|
||||||
|
pool *overlay.PacketPool
|
||||||
|
batchSize int
|
||||||
|
routines int
|
||||||
|
rxQueues []chan *overlay.Packet
|
||||||
|
txQueues []chan queuedDatagram
|
||||||
|
tunQueues []chan *overlay.Packet
|
||||||
|
}
|
||||||
|
|
||||||
|
type queuedDatagram struct {
|
||||||
|
packet *overlay.Packet
|
||||||
|
addr netip.AddrPort
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) init(device overlay.Device, routines int, queueDepth int, maxSegments int) {
|
||||||
|
if device == nil || routines <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bcap, ok := device.(overlay.BatchCapableDevice)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
headroom := bcap.BatchHeadroom()
|
||||||
|
payload := bcap.BatchPayloadCap()
|
||||||
|
if maxSegments < 1 {
|
||||||
|
maxSegments = 1
|
||||||
|
}
|
||||||
|
requiredPayload := udp.MTU * maxSegments
|
||||||
|
if payload < requiredPayload {
|
||||||
|
payload = requiredPayload
|
||||||
|
}
|
||||||
|
batchSize := bcap.BatchSize()
|
||||||
|
if headroom <= 0 || payload <= 0 || batchSize <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bp.enabled = true
|
||||||
|
bp.inside = bcap
|
||||||
|
bp.headroom = headroom
|
||||||
|
bp.payloadCap = payload
|
||||||
|
bp.batchSize = batchSize
|
||||||
|
bp.routines = routines
|
||||||
|
bp.pool = overlay.NewPacketPool(headroom, payload)
|
||||||
|
queueCap := batchSize * defaultBatchQueueDepthFactor
|
||||||
|
if queueDepth > 0 {
|
||||||
|
queueCap = queueDepth
|
||||||
|
}
|
||||||
|
if queueCap < batchSize {
|
||||||
|
queueCap = batchSize
|
||||||
|
}
|
||||||
|
bp.rxQueues = make([]chan *overlay.Packet, routines)
|
||||||
|
bp.txQueues = make([]chan queuedDatagram, routines)
|
||||||
|
bp.tunQueues = make([]chan *overlay.Packet, routines)
|
||||||
|
for i := 0; i < routines; i++ {
|
||||||
|
bp.rxQueues[i] = make(chan *overlay.Packet, queueCap)
|
||||||
|
bp.txQueues[i] = make(chan queuedDatagram, queueCap)
|
||||||
|
bp.tunQueues[i] = make(chan *overlay.Packet, queueCap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) Pool() *overlay.PacketPool {
|
||||||
|
if bp == nil || !bp.enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bp.pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) Enabled() bool {
|
||||||
|
return bp != nil && bp.enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) batchSizeHint() int {
|
||||||
|
if bp == nil || bp.batchSize <= 0 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return bp.batchSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) rxQueue(i int) chan *overlay.Packet {
|
||||||
|
if bp == nil || !bp.enabled || i < 0 || i >= len(bp.rxQueues) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bp.rxQueues[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) txQueue(i int) chan queuedDatagram {
|
||||||
|
if bp == nil || !bp.enabled || i < 0 || i >= len(bp.txQueues) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bp.txQueues[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) tunQueue(i int) chan *overlay.Packet {
|
||||||
|
if bp == nil || !bp.enabled || i < 0 || i >= len(bp.tunQueues) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bp.tunQueues[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) txQueueLen(i int) int {
|
||||||
|
q := bp.txQueue(i)
|
||||||
|
if q == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return len(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) tunQueueLen(i int) int {
|
||||||
|
q := bp.tunQueue(i)
|
||||||
|
if q == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return len(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) enqueueRx(i int, pkt *overlay.Packet) bool {
|
||||||
|
q := bp.rxQueue(i)
|
||||||
|
if q == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
q <- pkt
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) enqueueTx(i int, pkt *overlay.Packet, addr netip.AddrPort) bool {
|
||||||
|
q := bp.txQueue(i)
|
||||||
|
if q == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
q <- queuedDatagram{packet: pkt, addr: addr}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) enqueueTun(i int, pkt *overlay.Packet) bool {
|
||||||
|
q := bp.tunQueue(i)
|
||||||
|
if q == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
q <- pkt
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchPipelines) newPacket() *overlay.Packet {
|
||||||
|
if bp == nil || !bp.enabled || bp.pool == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bp.pool.Get()
|
||||||
|
}
|
||||||
@@ -84,16 +84,11 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calcu
|
|||||||
|
|
||||||
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
||||||
|
|
||||||
rawMap, ok := value.(map[any]any)
|
rawMap, ok := value.(map[string]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||||
}
|
}
|
||||||
for rawKey, rawValue := range rawMap {
|
for rawCIDR, rawValue := range rawMap {
|
||||||
rawCIDR, ok := rawKey.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
cidr, err := netip.ParsePrefix(rawCIDR)
|
cidr, err := netip.ParsePrefix(rawCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||||
@@ -129,7 +124,7 @@ func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculat
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculatedRemote, error) {
|
func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculatedRemote, error) {
|
||||||
rawMap, ok := raw.(map[any]any)
|
rawMap, ok := raw.(map[string]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid type: %T", raw)
|
return nil, fmt.Errorf("invalid type: %T", raw)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,6 +58,9 @@ type Certificate interface {
|
|||||||
// PublicKey is the raw bytes to be used in asymmetric cryptographic operations.
|
// PublicKey is the raw bytes to be used in asymmetric cryptographic operations.
|
||||||
PublicKey() []byte
|
PublicKey() []byte
|
||||||
|
|
||||||
|
// MarshalPublicKeyPEM is the value of PublicKey marshalled to PEM
|
||||||
|
MarshalPublicKeyPEM() []byte
|
||||||
|
|
||||||
// Curve identifies which curve was used for the PublicKey and Signature.
|
// Curve identifies which curve was used for the PublicKey and Signature.
|
||||||
Curve() Curve
|
Curve() Curve
|
||||||
|
|
||||||
@@ -135,8 +138,7 @@ func Recombine(v Version, rawCertBytes, publicKey []byte, curve Curve) (Certific
|
|||||||
case Version2:
|
case Version2:
|
||||||
c, err = unmarshalCertificateV2(rawCertBytes, publicKey, curve)
|
c, err = unmarshalCertificateV2(rawCertBytes, publicKey, curve)
|
||||||
default:
|
default:
|
||||||
//TODO: CERT-V2 make a static var
|
return nil, ErrUnknownVersion
|
||||||
return nil, fmt.Errorf("unknown certificate version %d", v)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -83,6 +83,10 @@ func (c *certificateV1) PublicKey() []byte {
|
|||||||
return c.details.publicKey
|
return c.details.publicKey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *certificateV1) MarshalPublicKeyPEM() []byte {
|
||||||
|
return marshalCertPublicKeyToPEM(c)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *certificateV1) Signature() []byte {
|
func (c *certificateV1) Signature() []byte {
|
||||||
return c.signature
|
return c.signature
|
||||||
}
|
}
|
||||||
@@ -110,8 +114,10 @@ func (c *certificateV1) CheckSignature(key []byte) bool {
|
|||||||
case Curve_CURVE25519:
|
case Curve_CURVE25519:
|
||||||
return ed25519.Verify(key, b, c.signature)
|
return ed25519.Verify(key, b, c.signature)
|
||||||
case Curve_P256:
|
case Curve_P256:
|
||||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
pubKey, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), key)
|
||||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
hashed := sha256.Sum256(b)
|
hashed := sha256.Sum256(b)
|
||||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package cert
|
package cert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ed25519"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestCertificateV1_Marshal(t *testing.T) {
|
func TestCertificateV1_Marshal(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||||
@@ -60,6 +62,58 @@ func TestCertificateV1_Marshal(t *testing.T) {
|
|||||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
assert.Equal(t, nc.Groups(), nc2.Groups())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCertificateV1_PublicKeyPem(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
pubKey := ed25519.PublicKey("1234567890abcedfghij1234567890ab")
|
||||||
|
|
||||||
|
nc := certificateV1{
|
||||||
|
details: detailsV1{
|
||||||
|
name: "testing",
|
||||||
|
networks: []netip.Prefix{},
|
||||||
|
unsafeNetworks: []netip.Prefix{},
|
||||||
|
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||||
|
notBefore: before,
|
||||||
|
notAfter: after,
|
||||||
|
publicKey: pubKey,
|
||||||
|
isCA: false,
|
||||||
|
issuer: "1234567890abcedfghij1234567890ab",
|
||||||
|
},
|
||||||
|
signature: []byte("1234567890abcedfghij1234567890ab"),
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, Version1, nc.Version())
|
||||||
|
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||||
|
pubPem := "-----BEGIN NEBULA X25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA X25519 PUBLIC KEY-----\n"
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||||
|
assert.False(t, nc.IsCA())
|
||||||
|
|
||||||
|
nc.details.isCA = true
|
||||||
|
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||||
|
pubPem = "-----BEGIN NEBULA ED25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA ED25519 PUBLIC KEY-----\n"
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||||
|
assert.True(t, nc.IsCA())
|
||||||
|
|
||||||
|
pubP256KeyPem := []byte(`-----BEGIN NEBULA P256 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA P256 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
pubP256Key, _, _, err := UnmarshalPublicKeyFromPEM(pubP256KeyPem)
|
||||||
|
require.NoError(t, err)
|
||||||
|
nc.details.curve = Curve_P256
|
||||||
|
nc.details.publicKey = pubP256Key
|
||||||
|
assert.Equal(t, Curve_P256, nc.Curve())
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||||
|
assert.True(t, nc.IsCA())
|
||||||
|
|
||||||
|
nc.details.isCA = false
|
||||||
|
assert.Equal(t, Curve_P256, nc.Curve())
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||||
|
assert.False(t, nc.IsCA())
|
||||||
|
}
|
||||||
|
|
||||||
func TestCertificateV1_Expired(t *testing.T) {
|
func TestCertificateV1_Expired(t *testing.T) {
|
||||||
nc := certificateV1{
|
nc := certificateV1{
|
||||||
details: detailsV1{
|
details: detailsV1{
|
||||||
|
|||||||
@@ -114,6 +114,10 @@ func (c *certificateV2) PublicKey() []byte {
|
|||||||
return c.publicKey
|
return c.publicKey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *certificateV2) MarshalPublicKeyPEM() []byte {
|
||||||
|
return marshalCertPublicKeyToPEM(c)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *certificateV2) Signature() []byte {
|
func (c *certificateV2) Signature() []byte {
|
||||||
return c.signature
|
return c.signature
|
||||||
}
|
}
|
||||||
@@ -149,8 +153,10 @@ func (c *certificateV2) CheckSignature(key []byte) bool {
|
|||||||
case Curve_CURVE25519:
|
case Curve_CURVE25519:
|
||||||
return ed25519.Verify(key, b, c.signature)
|
return ed25519.Verify(key, b, c.signature)
|
||||||
case Curve_P256:
|
case Curve_P256:
|
||||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
pubKey, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), key)
|
||||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
hashed := sha256.Sum256(b)
|
hashed := sha256.Sum256(b)
|
||||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestCertificateV2_Marshal(t *testing.T) {
|
func TestCertificateV2_Marshal(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||||
@@ -75,6 +76,58 @@ func TestCertificateV2_Marshal(t *testing.T) {
|
|||||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
assert.Equal(t, nc.Groups(), nc2.Groups())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCertificateV2_PublicKeyPem(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||||
|
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||||
|
pubKey := ed25519.PublicKey("1234567890abcedfghij1234567890ab")
|
||||||
|
|
||||||
|
nc := certificateV2{
|
||||||
|
details: detailsV2{
|
||||||
|
name: "testing",
|
||||||
|
networks: []netip.Prefix{},
|
||||||
|
unsafeNetworks: []netip.Prefix{},
|
||||||
|
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||||
|
notBefore: before,
|
||||||
|
notAfter: after,
|
||||||
|
isCA: false,
|
||||||
|
issuer: "1234567890abcedfghij1234567890ab",
|
||||||
|
},
|
||||||
|
publicKey: pubKey,
|
||||||
|
signature: []byte("1234567890abcedfghij1234567890ab"),
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, Version2, nc.Version())
|
||||||
|
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||||
|
pubPem := "-----BEGIN NEBULA X25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA X25519 PUBLIC KEY-----\n"
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||||
|
assert.False(t, nc.IsCA())
|
||||||
|
|
||||||
|
nc.details.isCA = true
|
||||||
|
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||||
|
pubPem = "-----BEGIN NEBULA ED25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA ED25519 PUBLIC KEY-----\n"
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||||
|
assert.True(t, nc.IsCA())
|
||||||
|
|
||||||
|
pubP256KeyPem := []byte(`-----BEGIN NEBULA P256 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA P256 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
pubP256Key, _, _, err := UnmarshalPublicKeyFromPEM(pubP256KeyPem)
|
||||||
|
require.NoError(t, err)
|
||||||
|
nc.curve = Curve_P256
|
||||||
|
nc.publicKey = pubP256Key
|
||||||
|
assert.Equal(t, Curve_P256, nc.Curve())
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||||
|
assert.True(t, nc.IsCA())
|
||||||
|
|
||||||
|
nc.details.isCA = false
|
||||||
|
assert.Equal(t, Curve_P256, nc.Curve())
|
||||||
|
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||||
|
assert.False(t, nc.IsCA())
|
||||||
|
}
|
||||||
|
|
||||||
func TestCertificateV2_Expired(t *testing.T) {
|
func TestCertificateV2_Expired(t *testing.T) {
|
||||||
nc := certificateV2{
|
nc := certificateV2{
|
||||||
details: detailsV2{
|
details: detailsV2{
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ var (
|
|||||||
ErrPublicPrivateKeyMismatch = errors.New("public key and private key are not a pair")
|
ErrPublicPrivateKeyMismatch = errors.New("public key and private key are not a pair")
|
||||||
ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
|
ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
|
||||||
ErrCaNotFound = errors.New("could not find ca for the certificate")
|
ErrCaNotFound = errors.New("could not find ca for the certificate")
|
||||||
|
ErrUnknownVersion = errors.New("certificate version unrecognized")
|
||||||
|
|
||||||
ErrInvalidPEMBlock = errors.New("input did not contain a valid PEM encoded block")
|
ErrInvalidPEMBlock = errors.New("input did not contain a valid PEM encoded block")
|
||||||
ErrInvalidPEMCertificateBanner = errors.New("bytes did not contain a proper certificate banner")
|
ErrInvalidPEMCertificateBanner = errors.New("bytes did not contain a proper certificate banner")
|
||||||
|
|||||||
149
cert/pem.go
149
cert/pem.go
@@ -1,25 +1,34 @@
|
|||||||
package cert
|
package cert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/hex"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const ( //cert banners
|
||||||
CertificateBanner = "NEBULA CERTIFICATE"
|
CertificateBanner = "NEBULA CERTIFICATE"
|
||||||
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
||||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
)
|
||||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
|
||||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
|
||||||
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
|
||||||
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
|
||||||
|
|
||||||
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
const ( //key-agreement-key banners
|
||||||
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||||
|
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||||
|
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
||||||
|
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
||||||
|
)
|
||||||
|
|
||||||
|
/* including "ECDSA" in the P256 banners is a clue that these keys should be used only for signing */
|
||||||
|
const ( //signing key banners
|
||||||
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
|
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
|
||||||
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
|
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
|
||||||
|
ECDSAP256PublicKeyBanner = "NEBULA ECDSA P256 PUBLIC KEY"
|
||||||
|
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||||
|
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
||||||
|
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalCertificateFromPEM will try to unmarshal the first pem block in a byte array, returning any non consumed
|
// UnmarshalCertificateFromPEM will try to unmarshal the first pem block in a byte array, returning any non consumed
|
||||||
@@ -51,6 +60,16 @@ func UnmarshalCertificateFromPEM(b []byte) (Certificate, []byte, error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func marshalCertPublicKeyToPEM(c Certificate) []byte {
|
||||||
|
if c.IsCA() {
|
||||||
|
return MarshalSigningPublicKeyToPEM(c.Curve(), c.PublicKey())
|
||||||
|
} else {
|
||||||
|
return MarshalPublicKeyToPEM(c.Curve(), c.PublicKey())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPublicKeyToPEM returns a PEM representation of a public key used for ECDH.
|
||||||
|
// if your public key came from a certificate, prefer Certificate.PublicKeyPEM() if possible, to avoid mistakes!
|
||||||
func MarshalPublicKeyToPEM(curve Curve, b []byte) []byte {
|
func MarshalPublicKeyToPEM(curve Curve, b []byte) []byte {
|
||||||
switch curve {
|
switch curve {
|
||||||
case Curve_CURVE25519:
|
case Curve_CURVE25519:
|
||||||
@@ -62,6 +81,19 @@ func MarshalPublicKeyToPEM(curve Curve, b []byte) []byte {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalSigningPublicKeyToPEM returns a PEM representation of a public key used for signing.
|
||||||
|
// if your public key came from a certificate, prefer Certificate.PublicKeyPEM() if possible, to avoid mistakes!
|
||||||
|
func MarshalSigningPublicKeyToPEM(curve Curve, b []byte) []byte {
|
||||||
|
switch curve {
|
||||||
|
case Curve_CURVE25519:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: b})
|
||||||
|
case Curve_P256:
|
||||||
|
return pem.EncodeToMemory(&pem.Block{Type: P256PublicKeyBanner, Bytes: b})
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func UnmarshalPublicKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
func UnmarshalPublicKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
k, r := pem.Decode(b)
|
k, r := pem.Decode(b)
|
||||||
if k == nil {
|
if k == nil {
|
||||||
@@ -73,7 +105,7 @@ func UnmarshalPublicKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
|||||||
case X25519PublicKeyBanner, Ed25519PublicKeyBanner:
|
case X25519PublicKeyBanner, Ed25519PublicKeyBanner:
|
||||||
expectedLen = 32
|
expectedLen = 32
|
||||||
curve = Curve_CURVE25519
|
curve = Curve_CURVE25519
|
||||||
case P256PublicKeyBanner:
|
case P256PublicKeyBanner, ECDSAP256PublicKeyBanner:
|
||||||
// Uncompressed
|
// Uncompressed
|
||||||
expectedLen = 65
|
expectedLen = 65
|
||||||
curve = Curve_P256
|
curve = Curve_P256
|
||||||
@@ -108,6 +140,101 @@ func MarshalSigningPrivateKeyToPEM(curve Curve, b []byte) []byte {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Backward compatibility functions for older API
|
||||||
|
func MarshalX25519PublicKey(b []byte) []byte {
|
||||||
|
return MarshalPublicKeyToPEM(Curve_CURVE25519, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalX25519PrivateKey(b []byte) []byte {
|
||||||
|
return MarshalPrivateKeyToPEM(Curve_CURVE25519, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalPublicKey(curve Curve, b []byte) []byte {
|
||||||
|
return MarshalPublicKeyToPEM(curve, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalPrivateKey(curve Curve, b []byte) []byte {
|
||||||
|
return MarshalPrivateKeyToPEM(curve, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NebulaCertificate is a compatibility wrapper for the old API
|
||||||
|
type NebulaCertificate struct {
|
||||||
|
Details NebulaCertificateDetails
|
||||||
|
Signature []byte
|
||||||
|
cert Certificate
|
||||||
|
}
|
||||||
|
|
||||||
|
// NebulaCertificateDetails is a compatibility wrapper for certificate details
|
||||||
|
type NebulaCertificateDetails struct {
|
||||||
|
Name string
|
||||||
|
NotBefore time.Time
|
||||||
|
NotAfter time.Time
|
||||||
|
PublicKey []byte
|
||||||
|
IsCA bool
|
||||||
|
Issuer []byte
|
||||||
|
Curve Curve
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNebulaCertificateFromPEM provides backward compatibility with the old API
|
||||||
|
func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, error) {
|
||||||
|
c, rest, err := UnmarshalCertificateFromPEM(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, rest, err
|
||||||
|
}
|
||||||
|
|
||||||
|
issuerBytes, err := func() ([]byte, error) {
|
||||||
|
issuer := c.Issuer()
|
||||||
|
if issuer == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
decoded, err := hex.DecodeString(issuer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode issuer fingerprint: %w", err)
|
||||||
|
}
|
||||||
|
return decoded, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return nil, rest, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKey := c.PublicKey()
|
||||||
|
if pubKey != nil {
|
||||||
|
pubKey = append([]byte(nil), pubKey...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := c.Signature()
|
||||||
|
if sig != nil {
|
||||||
|
sig = append([]byte(nil), sig...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &NebulaCertificate{
|
||||||
|
Details: NebulaCertificateDetails{
|
||||||
|
Name: c.Name(),
|
||||||
|
NotBefore: c.NotBefore(),
|
||||||
|
NotAfter: c.NotAfter(),
|
||||||
|
PublicKey: pubKey,
|
||||||
|
IsCA: c.IsCA(),
|
||||||
|
Issuer: issuerBytes,
|
||||||
|
Curve: c.Curve(),
|
||||||
|
},
|
||||||
|
Signature: sig,
|
||||||
|
cert: c,
|
||||||
|
}, rest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerString returns the issuer in hex format for compatibility
|
||||||
|
func (n *NebulaCertificate) IssuerString() string {
|
||||||
|
if n.Details.Issuer == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(n.Details.Issuer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Certificate returns the underlying certificate (read-only)
|
||||||
|
func (n *NebulaCertificate) Certificate() Certificate {
|
||||||
|
return n.cert
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalPrivateKeyFromPEM will try to unmarshal the first pem block in a byte array, returning any non
|
// UnmarshalPrivateKeyFromPEM will try to unmarshal the first pem block in a byte array, returning any non
|
||||||
// consumed data or an error on failure
|
// consumed data or an error on failure
|
||||||
func UnmarshalPrivateKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
func UnmarshalPrivateKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
||||||
|
|||||||
@@ -177,6 +177,7 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalPublicKeyFromPEM(t *testing.T) {
|
func TestUnmarshalPublicKeyFromPEM(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
pubKey := []byte(`# A good key
|
pubKey := []byte(`# A good key
|
||||||
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
@@ -230,6 +231,7 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalX25519PublicKey(t *testing.T) {
|
func TestUnmarshalX25519PublicKey(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
pubKey := []byte(`# A good key
|
pubKey := []byte(`# A good key
|
||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
@@ -240,6 +242,12 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
AAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-----END NEBULA P256 PUBLIC KEY-----
|
-----END NEBULA P256 PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
oldPubP256Key := []byte(`# A good key
|
||||||
|
-----BEGIN NEBULA ECDSA P256 PUBLIC KEY-----
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
|
-----END NEBULA ECDSA P256 PUBLIC KEY-----
|
||||||
`)
|
`)
|
||||||
shortKey := []byte(`# A short key
|
shortKey := []byte(`# A short key
|
||||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||||
@@ -256,15 +264,22 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
|||||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||||
-END NEBULA X25519 PUBLIC KEY-----`)
|
-END NEBULA X25519 PUBLIC KEY-----`)
|
||||||
|
|
||||||
keyBundle := appendByteSlices(pubKey, pubP256Key, shortKey, invalidBanner, invalidPem)
|
keyBundle := appendByteSlices(pubKey, pubP256Key, oldPubP256Key, shortKey, invalidBanner, invalidPem)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, curve, err := UnmarshalPublicKeyFromPEM(keyBundle)
|
k, rest, curve, err := UnmarshalPublicKeyFromPEM(keyBundle)
|
||||||
assert.Len(t, k, 32)
|
assert.Len(t, k, 32)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, rest, appendByteSlices(pubP256Key, shortKey, invalidBanner, invalidPem))
|
assert.Equal(t, rest, appendByteSlices(pubP256Key, oldPubP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
assert.Equal(t, Curve_CURVE25519, curve)
|
assert.Equal(t, Curve_CURVE25519, curve)
|
||||||
|
|
||||||
|
// Success test case
|
||||||
|
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||||
|
assert.Len(t, k, 65)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, rest, appendByteSlices(oldPubP256Key, shortKey, invalidBanner, invalidPem))
|
||||||
|
assert.Equal(t, Curve_P256, curve)
|
||||||
|
|
||||||
// Success test case
|
// Success test case
|
||||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||||
assert.Len(t, k, 65)
|
assert.Len(t, k, 65)
|
||||||
|
|||||||
12
cert/sign.go
12
cert/sign.go
@@ -7,7 +7,6 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@@ -55,15 +54,10 @@ func (t *TBSCertificate) Sign(signer Certificate, curve Curve, key []byte) (Cert
|
|||||||
}
|
}
|
||||||
return t.SignWith(signer, curve, sp)
|
return t.SignWith(signer, curve, sp)
|
||||||
case Curve_P256:
|
case Curve_P256:
|
||||||
pk := &ecdsa.PrivateKey{
|
pk, err := ecdsa.ParseRawPrivateKey(elliptic.P256(), key)
|
||||||
PublicKey: ecdsa.PublicKey{
|
if err != nil {
|
||||||
Curve: elliptic.P256(),
|
return nil, err
|
||||||
},
|
|
||||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L95
|
|
||||||
D: new(big.Int).SetBytes(key),
|
|
||||||
}
|
}
|
||||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L119
|
|
||||||
pk.X, pk.Y = pk.Curve.ScalarBaseMult(key)
|
|
||||||
sp := func(certBytes []byte) ([]byte, error) {
|
sp := func(certBytes []byte) ([]byte, error) {
|
||||||
// We need to hash first for ECDSA
|
// We need to hash first for ECDSA
|
||||||
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
|
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
|
||||||
|
|||||||
@@ -114,6 +114,33 @@ func NewTestCert(v cert.Version, curve cert.Curve, ca cert.Certificate, key []by
|
|||||||
return c, pub, cert.MarshalPrivateKeyToPEM(curve, priv), pem
|
return c, pub, cert.MarshalPrivateKeyToPEM(curve, priv), pem
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewTestCertDifferentVersion(c cert.Certificate, v cert.Version, ca cert.Certificate, key []byte) (cert.Certificate, []byte) {
|
||||||
|
nc := &cert.TBSCertificate{
|
||||||
|
Version: v,
|
||||||
|
Curve: c.Curve(),
|
||||||
|
Name: c.Name(),
|
||||||
|
Networks: c.Networks(),
|
||||||
|
UnsafeNetworks: c.UnsafeNetworks(),
|
||||||
|
Groups: c.Groups(),
|
||||||
|
NotBefore: time.Unix(c.NotBefore().Unix(), 0),
|
||||||
|
NotAfter: time.Unix(c.NotAfter().Unix(), 0),
|
||||||
|
PublicKey: c.PublicKey(),
|
||||||
|
IsCA: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := nc.Sign(ca, ca.Curve(), key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := c.MarshalPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, pem
|
||||||
|
}
|
||||||
|
|
||||||
func X25519Keypair() ([]byte, []byte) {
|
func X25519Keypair() ([]byte, []byte) {
|
||||||
privkey := make([]byte, 32)
|
privkey := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"dario.cat/mergo"
|
"dario.cat/mergo"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -27,14 +27,13 @@ type C struct {
|
|||||||
oldSettings map[string]any
|
oldSettings map[string]any
|
||||||
callbacks []func(*C)
|
callbacks []func(*C)
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
reloadLock synctrace.Mutex
|
reloadLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewC(l *logrus.Logger) *C {
|
func NewC(l *logrus.Logger) *C {
|
||||||
return &C{
|
return &C{
|
||||||
Settings: make(map[string]any),
|
Settings: make(map[string]any),
|
||||||
l: l,
|
l: l,
|
||||||
reloadLock: synctrace.NewMutex("config-reload"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,14 +4,17 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type trafficDecision int
|
type trafficDecision int
|
||||||
@@ -27,130 +30,124 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type connectionManager struct {
|
type connectionManager struct {
|
||||||
in map[uint32]struct{}
|
|
||||||
inLock synctrace.RWMutex
|
|
||||||
|
|
||||||
out map[uint32]struct{}
|
|
||||||
outLock synctrace.RWMutex
|
|
||||||
|
|
||||||
// relayUsed holds which relay localIndexs are in use
|
// relayUsed holds which relay localIndexs are in use
|
||||||
relayUsed map[uint32]struct{}
|
relayUsed map[uint32]struct{}
|
||||||
relayUsedLock synctrace.RWMutex
|
relayUsedLock *sync.RWMutex
|
||||||
|
|
||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
trafficTimer *LockingTimerWheel[uint32]
|
trafficTimer *LockingTimerWheel[uint32]
|
||||||
intf *Interface
|
intf *Interface
|
||||||
pendingDeletion map[uint32]struct{}
|
punchy *Punchy
|
||||||
punchy *Punchy
|
|
||||||
|
// Configuration settings
|
||||||
checkInterval time.Duration
|
checkInterval time.Duration
|
||||||
pendingDeletionInterval time.Duration
|
pendingDeletionInterval time.Duration
|
||||||
metricsTxPunchy metrics.Counter
|
inactivityTimeout atomic.Int64
|
||||||
|
dropInactive atomic.Bool
|
||||||
|
|
||||||
|
metricsTxPunchy metrics.Counter
|
||||||
|
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager {
|
||||||
var max time.Duration
|
cm := &connectionManager{
|
||||||
if checkInterval < pendingDeletionInterval {
|
hostMap: hm,
|
||||||
max = pendingDeletionInterval
|
l: l,
|
||||||
} else {
|
punchy: p,
|
||||||
max = checkInterval
|
relayUsed: make(map[uint32]struct{}),
|
||||||
|
relayUsedLock: &sync.RWMutex{},
|
||||||
|
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||||
}
|
}
|
||||||
|
|
||||||
nc := &connectionManager{
|
cm.reload(c, true)
|
||||||
hostMap: intf.hostMap,
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
in: make(map[uint32]struct{}),
|
cm.reload(c, false)
|
||||||
inLock: synctrace.NewRWMutex("connection-manager-in"),
|
})
|
||||||
out: make(map[uint32]struct{}),
|
|
||||||
outLock: synctrace.NewRWMutex("connection-manager-out"),
|
|
||||||
relayUsed: make(map[uint32]struct{}),
|
|
||||||
relayUsedLock: synctrace.NewRWMutex("connection-manager-relay-used"),
|
|
||||||
trafficTimer: NewLockingTimerWheel[uint32]("traffic-timer", time.Millisecond*500, max),
|
|
||||||
intf: intf,
|
|
||||||
pendingDeletion: make(map[uint32]struct{}),
|
|
||||||
checkInterval: checkInterval,
|
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
|
||||||
punchy: punchy,
|
|
||||||
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
|
||||||
l: l,
|
|
||||||
}
|
|
||||||
|
|
||||||
nc.Start(ctx)
|
return cm
|
||||||
return nc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) In(localIndex uint32) {
|
func (cm *connectionManager) reload(c *config.C, initial bool) {
|
||||||
n.inLock.RLock()
|
if initial {
|
||||||
// If this already exists, return
|
cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second
|
||||||
if _, ok := n.in[localIndex]; ok {
|
cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second
|
||||||
n.inLock.RUnlock()
|
|
||||||
return
|
// We want at least a minimum resolution of 500ms per tick so that we can hit these intervals
|
||||||
|
// pretty close to their configured duration.
|
||||||
|
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it.
|
||||||
|
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
|
||||||
|
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
|
||||||
|
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if initial || c.HasChanged("tunnels.inactivity_timeout") {
|
||||||
|
old := cm.getInactivityTimeout()
|
||||||
|
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
|
||||||
|
if !initial {
|
||||||
|
cm.l.WithField("oldDuration", old).
|
||||||
|
WithField("newDuration", cm.getInactivityTimeout()).
|
||||||
|
Info("Inactivity timeout has changed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if initial || c.HasChanged("tunnels.drop_inactive") {
|
||||||
|
old := cm.dropInactive.Load()
|
||||||
|
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
|
||||||
|
if !initial {
|
||||||
|
cm.l.WithField("oldBool", old).
|
||||||
|
WithField("newBool", cm.dropInactive.Load()).
|
||||||
|
Info("Drop inactive setting has changed")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
n.inLock.RUnlock()
|
|
||||||
n.inLock.Lock()
|
|
||||||
n.in[localIndex] = struct{}{}
|
|
||||||
n.inLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Out(localIndex uint32) {
|
func (cm *connectionManager) getInactivityTimeout() time.Duration {
|
||||||
n.outLock.RLock()
|
return (time.Duration)(cm.inactivityTimeout.Load())
|
||||||
// If this already exists, return
|
|
||||||
if _, ok := n.out[localIndex]; ok {
|
|
||||||
n.outLock.RUnlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n.outLock.RUnlock()
|
|
||||||
n.outLock.Lock()
|
|
||||||
n.out[localIndex] = struct{}{}
|
|
||||||
n.outLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
func (cm *connectionManager) In(h *HostInfo) {
|
||||||
n.relayUsedLock.RLock()
|
h.in.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *connectionManager) Out(h *HostInfo) {
|
||||||
|
h.out.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *connectionManager) RelayUsed(localIndex uint32) {
|
||||||
|
cm.relayUsedLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.relayUsed[localIndex]; ok {
|
if _, ok := cm.relayUsed[localIndex]; ok {
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
n.relayUsedLock.Lock()
|
cm.relayUsedLock.Lock()
|
||||||
n.relayUsed[localIndex] = struct{}{}
|
cm.relayUsed[localIndex] = struct{}{}
|
||||||
n.relayUsedLock.Unlock()
|
cm.relayUsedLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
||||||
// resets the state for this local index
|
// resets the state for this local index
|
||||||
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) {
|
||||||
n.inLock.Lock()
|
in := h.in.Swap(false)
|
||||||
n.outLock.Lock()
|
out := h.out.Swap(false)
|
||||||
_, in := n.in[localIndex]
|
if in || out {
|
||||||
_, out := n.out[localIndex]
|
h.lastUsed = now
|
||||||
delete(n.in, localIndex)
|
}
|
||||||
delete(n.out, localIndex)
|
|
||||||
n.inLock.Unlock()
|
|
||||||
n.outLock.Unlock()
|
|
||||||
return in, out
|
return in, out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
// AddTrafficWatch must be called for every new HostInfo.
|
||||||
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
// We will continue to monitor the HostInfo until the tunnel is dropped.
|
||||||
n.outLock.Lock()
|
func (cm *connectionManager) AddTrafficWatch(h *HostInfo) {
|
||||||
if _, ok := n.out[localIndex]; ok {
|
if h.out.Swap(true) == false {
|
||||||
n.outLock.Unlock()
|
cm.trafficTimer.Add(h.localIndexId, cm.checkInterval)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
n.out[localIndex] = struct{}{}
|
|
||||||
n.trafficTimer.Add(localIndex, n.checkInterval)
|
|
||||||
n.outLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Start(ctx context.Context) {
|
func (cm *connectionManager) Start(ctx context.Context) {
|
||||||
go n.Run(ctx)
|
clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration)
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) Run(ctx context.Context) {
|
|
||||||
//TODO: this tick should be based on the min wheel tick? Check firewall
|
|
||||||
clockSource := time.NewTicker(500 * time.Millisecond)
|
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
@@ -163,61 +160,61 @@ func (n *connectionManager) Run(ctx context.Context) {
|
|||||||
return
|
return
|
||||||
|
|
||||||
case now := <-clockSource.C:
|
case now := <-clockSource.C:
|
||||||
n.trafficTimer.Advance(now)
|
cm.trafficTimer.Advance(now)
|
||||||
for {
|
for {
|
||||||
localIndex, has := n.trafficTimer.Purge()
|
localIndex, has := cm.trafficTimer.Purge()
|
||||||
if !has {
|
if !has {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
n.doTrafficCheck(localIndex, p, nb, out, now)
|
cm.doTrafficCheck(localIndex, p, nb, out, now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now)
|
||||||
|
|
||||||
switch decision {
|
switch decision {
|
||||||
case deleteTunnel:
|
case deleteTunnel:
|
||||||
if n.hostMap.DeleteHostInfo(hostinfo) {
|
if cm.hostMap.DeleteHostInfo(hostinfo) {
|
||||||
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||||
n.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
|
cm.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
case closeTunnel:
|
case closeTunnel:
|
||||||
n.intf.sendCloseTunnel(hostinfo)
|
cm.intf.sendCloseTunnel(hostinfo)
|
||||||
n.intf.closeTunnel(hostinfo)
|
cm.intf.closeTunnel(hostinfo)
|
||||||
|
|
||||||
case swapPrimary:
|
case swapPrimary:
|
||||||
n.swapPrimary(hostinfo, primary)
|
cm.swapPrimary(hostinfo, primary)
|
||||||
|
|
||||||
case migrateRelays:
|
case migrateRelays:
|
||||||
n.migrateRelayUsed(hostinfo, primary)
|
cm.migrateRelayUsed(hostinfo, primary)
|
||||||
|
|
||||||
case tryRehandshake:
|
case tryRehandshake:
|
||||||
n.tryRehandshake(hostinfo)
|
cm.tryRehandshake(hostinfo)
|
||||||
|
|
||||||
case sendTestPacket:
|
case sendTestPacket:
|
||||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.resetRelayTrafficCheck(hostinfo)
|
cm.resetRelayTrafficCheck(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||||
if hostinfo != nil {
|
if hostinfo != nil {
|
||||||
n.relayUsedLock.Lock()
|
cm.relayUsedLock.Lock()
|
||||||
defer n.relayUsedLock.Unlock()
|
defer cm.relayUsedLock.Unlock()
|
||||||
// No need to migrate any relays, delete usage info now.
|
// No need to migrate any relays, delete usage info now.
|
||||||
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
delete(n.relayUsed, idx)
|
delete(cm.relayUsed, idx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||||
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||||
|
|
||||||
for _, r := range relayFor {
|
for _, r := range relayFor {
|
||||||
@@ -227,46 +224,51 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||||||
var relayFrom netip.Addr
|
var relayFrom netip.Addr
|
||||||
var relayTo netip.Addr
|
var relayTo netip.Addr
|
||||||
switch {
|
switch {
|
||||||
case ok && existing.State == Established:
|
case ok:
|
||||||
// This relay already exists in newhostinfo, then do nothing.
|
switch existing.State {
|
||||||
continue
|
case Established, PeerRequested, Disestablished:
|
||||||
case ok && existing.State == Requested:
|
// This relay already exists in newhostinfo, then do nothing.
|
||||||
// The relay exists in a Requested state; re-send the request
|
continue
|
||||||
index = existing.LocalIndex
|
case Requested:
|
||||||
switch r.Type {
|
// The relay exists in a Requested state; re-send the request
|
||||||
case TerminalType:
|
index = existing.LocalIndex
|
||||||
relayFrom = n.intf.myVpnAddrs[0]
|
switch r.Type {
|
||||||
relayTo = existing.PeerAddr
|
case TerminalType:
|
||||||
case ForwardingType:
|
relayFrom = cm.intf.myVpnAddrs[0]
|
||||||
relayFrom = existing.PeerAddr
|
relayTo = existing.PeerAddr
|
||||||
relayTo = newhostinfo.vpnAddrs[0]
|
case ForwardingType:
|
||||||
default:
|
relayFrom = existing.PeerAddr
|
||||||
// should never happen
|
relayTo = newhostinfo.vpnAddrs[0]
|
||||||
|
default:
|
||||||
|
// should never happen
|
||||||
|
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case !ok:
|
case !ok:
|
||||||
n.relayUsedLock.RLock()
|
cm.relayUsedLock.RLock()
|
||||||
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed {
|
||||||
// The relay hasn't been used; don't migrate it.
|
// The relay hasn't been used; don't migrate it.
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
// The relay doesn't exist at all; create some relay state and send the request.
|
// The relay doesn't exist at all; create some relay state and send the request.
|
||||||
var err error
|
var err error
|
||||||
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerAddr, nil, r.Type, Requested)
|
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerAddr, nil, r.Type, Requested)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch r.Type {
|
switch r.Type {
|
||||||
case TerminalType:
|
case TerminalType:
|
||||||
relayFrom = n.intf.myVpnAddrs[0]
|
relayFrom = cm.intf.myVpnAddrs[0]
|
||||||
relayTo = r.PeerAddr
|
relayTo = r.PeerAddr
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
relayFrom = r.PeerAddr
|
relayFrom = r.PeerAddr
|
||||||
relayTo = newhostinfo.vpnAddrs[0]
|
relayTo = newhostinfo.vpnAddrs[0]
|
||||||
default:
|
default:
|
||||||
// should never happen
|
// should never happen
|
||||||
|
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -279,12 +281,12 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||||||
switch newhostinfo.GetCert().Certificate.Version() {
|
switch newhostinfo.GetCert().Certificate.Version() {
|
||||||
case cert.Version1:
|
case cert.Version1:
|
||||||
if !relayFrom.Is4() {
|
if !relayFrom.Is4() {
|
||||||
n.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
|
cm.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !relayTo.Is4() {
|
if !relayTo.Is4() {
|
||||||
n.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
cm.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -296,16 +298,16 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||||||
req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
|
req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
|
||||||
req.RelayToAddr = netAddrToProtoAddr(relayTo)
|
req.RelayToAddr = netAddrToProtoAddr(relayTo)
|
||||||
default:
|
default:
|
||||||
newhostinfo.logger(n.l).Error("Unknown certificate version found while attempting to migrate relay")
|
newhostinfo.logger(cm.l).Error("Unknown certificate version found while attempting to migrate relay")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
msg, err := req.Marshal()
|
msg, err := req.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||||
} else {
|
} else {
|
||||||
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
n.l.WithFields(logrus.Fields{
|
cm.l.WithFields(logrus.Fields{
|
||||||
"relayFrom": req.RelayFromAddr,
|
"relayFrom": req.RelayFromAddr,
|
||||||
"relayTo": req.RelayToAddr,
|
"relayTo": req.RelayToAddr,
|
||||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||||
@@ -316,46 +318,44 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||||
n.hostMap.RLock()
|
// Read lock the main hostmap to order decisions based on tunnels being the primary tunnel
|
||||||
defer n.hostMap.RUnlock()
|
cm.hostMap.RLock()
|
||||||
|
defer cm.hostMap.RUnlock()
|
||||||
|
|
||||||
hostinfo := n.hostMap.Indexes[localIndex]
|
hostinfo := cm.hostMap.Indexes[localIndex]
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap")
|
||||||
delete(n.pendingDeletion, localIndex)
|
|
||||||
return doNothing, nil, nil
|
return doNothing, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.isInvalidCertificate(now, hostinfo) {
|
if cm.isInvalidCertificate(now, hostinfo) {
|
||||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
|
||||||
return closeTunnel, hostinfo, nil
|
return closeTunnel, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
primary := n.hostMap.Hosts[hostinfo.vpnAddrs[0]]
|
primary := cm.hostMap.Hosts[hostinfo.vpnAddrs[0]]
|
||||||
mainHostInfo := true
|
mainHostInfo := true
|
||||||
if primary != nil && primary != hostinfo {
|
if primary != nil && primary != hostinfo {
|
||||||
mainHostInfo = false
|
mainHostInfo = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for traffic on this hostinfo
|
// Check for traffic on this hostinfo
|
||||||
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now)
|
||||||
|
|
||||||
// A hostinfo is determined alive if there is incoming traffic
|
// A hostinfo is determined alive if there is incoming traffic
|
||||||
if inTraffic {
|
if inTraffic {
|
||||||
decision := doNothing
|
decision := doNothing
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
if cm.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(cm.l).
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
}
|
}
|
||||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
hostinfo.pendingDeletion.Store(false)
|
||||||
|
|
||||||
if mainHostInfo {
|
if mainHostInfo {
|
||||||
decision = tryRehandshake
|
decision = tryRehandshake
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if n.shouldSwapPrimary(hostinfo, primary) {
|
if cm.shouldSwapPrimary(hostinfo) {
|
||||||
decision = swapPrimary
|
decision = swapPrimary
|
||||||
} else {
|
} else {
|
||||||
// migrate the relays to the primary, if in use.
|
// migrate the relays to the primary, if in use.
|
||||||
@@ -363,46 +363,55 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||||
|
|
||||||
if !outTraffic {
|
if !outTraffic {
|
||||||
// Send a punch packet to keep the NAT state alive
|
// Send a punch packet to keep the NAT state alive
|
||||||
n.sendPunch(hostinfo)
|
cm.sendPunch(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
return decision, hostinfo, primary
|
return decision, hostinfo, primary
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
if hostinfo.pendingDeletion.Load() {
|
||||||
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(cm.l).
|
||||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||||
Info("Tunnel status")
|
Info("Tunnel status")
|
||||||
|
|
||||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
|
||||||
return deleteTunnel, hostinfo, nil
|
return deleteTunnel, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
decision := doNothing
|
decision := doNothing
|
||||||
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||||
if !outTraffic {
|
if !outTraffic {
|
||||||
|
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
|
||||||
|
if isInactive {
|
||||||
|
// Tunnel is inactive, tear it down
|
||||||
|
hostinfo.logger(cm.l).
|
||||||
|
WithField("inactiveDuration", inactiveFor).
|
||||||
|
WithField("primary", mainHostInfo).
|
||||||
|
Info("Dropping tunnel due to inactivity")
|
||||||
|
|
||||||
|
return closeTunnel, hostinfo, primary
|
||||||
|
}
|
||||||
|
|
||||||
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||||
// Just maintain NAT state if configured to do so.
|
// Just maintain NAT state if configured to do so.
|
||||||
n.sendPunch(hostinfo)
|
cm.sendPunch(hostinfo)
|
||||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||||
return doNothing, nil, nil
|
return doNothing, nil, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.punchy.GetTargetEverything() {
|
if cm.punchy.GetTargetEverything() {
|
||||||
// This is similar to the old punchy behavior with a slight optimization.
|
// This is similar to the old punchy behavior with a slight optimization.
|
||||||
// We aren't receiving traffic but we are sending it, punch on all known
|
// We aren't receiving traffic but we are sending it, punch on all known
|
||||||
// ips in case we need to re-prime NAT state
|
// ips in case we need to re-prime NAT state
|
||||||
n.sendPunch(hostinfo)
|
cm.sendPunch(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
if cm.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(cm.l).
|
||||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
}
|
}
|
||||||
@@ -411,17 +420,33 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||||||
decision = sendTestPacket
|
decision = sendTestPacket
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
if cm.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
hostinfo.logger(cm.l).Debugf("Hostinfo sadness")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
hostinfo.pendingDeletion.Store(true)
|
||||||
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval)
|
||||||
return decision, hostinfo, nil
|
return decision, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) {
|
||||||
|
if cm.dropInactive.Load() == false {
|
||||||
|
// We aren't configured to drop inactive tunnels
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
inactiveDuration := now.Sub(hostinfo.lastUsed)
|
||||||
|
if inactiveDuration < cm.getInactivityTimeout() {
|
||||||
|
// It's not considered inactive
|
||||||
|
return inactiveDuration, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The tunnel is inactive
|
||||||
|
return inactiveDuration, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *connectionManager) shouldSwapPrimary(current *HostInfo) bool {
|
||||||
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
||||||
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||||
// Let's sort this out.
|
// Let's sort this out.
|
||||||
@@ -429,83 +454,127 @@ func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
|||||||
// Only one side should swap because if both swap then we may never resolve to a single tunnel.
|
// Only one side should swap because if both swap then we may never resolve to a single tunnel.
|
||||||
// vpn addr is static across all tunnels for this host pair so lets
|
// vpn addr is static across all tunnels for this host pair so lets
|
||||||
// use that to determine if we should consider swapping.
|
// use that to determine if we should consider swapping.
|
||||||
if current.vpnAddrs[0].Compare(n.intf.myVpnAddrs[0]) < 0 {
|
if current.vpnAddrs[0].Compare(cm.intf.myVpnAddrs[0]) < 0 {
|
||||||
// Their primary vpn addr is less than mine. Do not swap.
|
// Their primary vpn addr is less than mine. Do not swap.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
crt := n.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
|
crt := cm.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
|
||||||
|
if crt == nil {
|
||||||
|
//my cert was reloaded away. We should definitely swap from this tunnel
|
||||||
|
return true
|
||||||
|
}
|
||||||
// If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
|
// If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
|
||||||
// settle down.
|
// settle down.
|
||||||
return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
|
return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||||
n.hostMap.Lock()
|
cm.hostMap.Lock()
|
||||||
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||||
if n.hostMap.Hosts[current.vpnAddrs[0]] == primary {
|
if cm.hostMap.Hosts[current.vpnAddrs[0]] == primary {
|
||||||
n.hostMap.unlockedMakePrimary(current)
|
cm.hostMap.unlockedMakePrimary(current)
|
||||||
}
|
}
|
||||||
n.hostMap.Unlock()
|
cm.hostMap.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
// isInvalidCertificate decides if we should destroy a tunnel.
|
||||||
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
// returns true if pki.disconnect_invalid is true and the certificate is no longer valid.
|
||||||
// check and return true.
|
// Blocklisted certificates will skip the pki.disconnect_invalid check and return true.
|
||||||
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||||
remoteCert := hostinfo.GetCert()
|
remoteCert := hostinfo.GetCert()
|
||||||
if remoteCert == nil {
|
if remoteCert == nil {
|
||||||
return false
|
return false //don't tear down tunnels for handshakes in progress
|
||||||
}
|
}
|
||||||
|
|
||||||
caPool := n.intf.pki.GetCAPool()
|
caPool := cm.intf.pki.GetCAPool()
|
||||||
err := caPool.VerifyCachedCertificate(now, remoteCert)
|
err := caPool.VerifyCachedCertificate(now, remoteCert)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false
|
return false //cert is still valid! yay!
|
||||||
}
|
} else if err == cert.ErrBlockListed { //avoiding errors.Is for speed
|
||||||
|
|
||||||
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
|
||||||
// Block listed certificates should always be disconnected
|
// Block listed certificates should always be disconnected
|
||||||
|
hostinfo.logger(cm.l).WithError(err).
|
||||||
|
WithField("fingerprint", remoteCert.Fingerprint).
|
||||||
|
Info("Remote certificate is blocked, tearing down the tunnel")
|
||||||
|
return true
|
||||||
|
} else if cm.intf.disconnectInvalid.Load() {
|
||||||
|
hostinfo.logger(cm.l).WithError(err).
|
||||||
|
WithField("fingerprint", remoteCert.Fingerprint).
|
||||||
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
//if we reach here, the cert is no longer valid, but we're configured to keep tunnels from now-invalid certs open
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.logger(n.l).WithError(err).
|
|
||||||
WithField("fingerprint", remoteCert.Fingerprint).
|
|
||||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||||
if !n.punchy.GetPunch() {
|
if !cm.punchy.GetPunch() {
|
||||||
// Punching is disabled
|
// Punching is disabled
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.punchy.GetTargetEverything() {
|
if cm.intf.lightHouse.IsAnyLighthouseAddr(hostinfo.vpnAddrs) {
|
||||||
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
|
||||||
n.metricsTxPunchy.Inc(1)
|
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
|
||||||
n.intf.outside.WriteTo([]byte{1}, addr)
|
// would lose the ability to notify us and punchy.respond would become unreliable.
|
||||||
})
|
|
||||||
|
|
||||||
} else if hostinfo.remote.IsValid() {
|
|
||||||
n.metricsTxPunchy.Inc(1)
|
|
||||||
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
|
||||||
cs := n.intf.pki.getCertState()
|
|
||||||
curCrt := hostinfo.ConnectionState.myCert
|
|
||||||
myCrt := cs.getCertificate(curCrt.Version())
|
|
||||||
if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
|
|
||||||
// The current tunnel is using the latest certificate and version, no need to rehandshake.
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
n.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
if cm.punchy.GetTargetEverything() {
|
||||||
WithField("reason", "local certificate is not current").
|
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
||||||
Info("Re-handshaking with remote")
|
cm.metricsTxPunchy.Inc(1)
|
||||||
|
cm.intf.outside.WriteTo([]byte{1}, addr)
|
||||||
|
})
|
||||||
|
|
||||||
n.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
} else if hostinfo.remote.IsValid() {
|
||||||
|
cm.metricsTxPunchy.Inc(1)
|
||||||
|
cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||||
|
cs := cm.intf.pki.getCertState()
|
||||||
|
curCrt := hostinfo.ConnectionState.myCert
|
||||||
|
curCrtVersion := curCrt.Version()
|
||||||
|
myCrt := cs.getCertificate(curCrtVersion)
|
||||||
|
if myCrt == nil {
|
||||||
|
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||||
|
WithField("version", curCrtVersion).
|
||||||
|
WithField("reason", "local certificate removed").
|
||||||
|
Info("Re-handshaking with remote")
|
||||||
|
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
peerCrt := hostinfo.ConnectionState.peerCert
|
||||||
|
if peerCrt != nil && curCrtVersion < peerCrt.Certificate.Version() {
|
||||||
|
// if our certificate version is less than theirs, and we have a matching version available, rehandshake?
|
||||||
|
if cs.getCertificate(peerCrt.Certificate.Version()) != nil {
|
||||||
|
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||||
|
WithField("version", curCrtVersion).
|
||||||
|
WithField("peerVersion", peerCrt.Certificate.Version()).
|
||||||
|
WithField("reason", "local certificate version lower than peer, attempting to correct").
|
||||||
|
Info("Re-handshaking with remote")
|
||||||
|
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], func(hh *HandshakeHostInfo) {
|
||||||
|
hh.initiatingVersionOverride = peerCrt.Certificate.Version()
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !bytes.Equal(curCrt.Signature(), myCrt.Signature()) {
|
||||||
|
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||||
|
WithField("reason", "local certificate is not current").
|
||||||
|
Info("Re-handshaking with remote")
|
||||||
|
|
||||||
|
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if curCrtVersion < cs.initiatingVersion {
|
||||||
|
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||||
|
WithField("reason", "current cert version < pki.initiatingVersion").
|
||||||
|
Info("Re-handshaking with remote")
|
||||||
|
|
||||||
|
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
@@ -23,7 +22,7 @@ func newTestLighthouse() *LightHouse {
|
|||||||
addrMap: map[netip.Addr]*RemoteList{},
|
addrMap: map[netip.Addr]*RemoteList{},
|
||||||
queryChan: make(chan netip.Addr, 10),
|
queryChan: make(chan netip.Addr, 10),
|
||||||
}
|
}
|
||||||
lighthouses := map[netip.Addr]struct{}{}
|
lighthouses := []netip.Addr{}
|
||||||
staticList := map[netip.Addr]struct{}{}
|
staticList := map[netip.Addr]struct{}{}
|
||||||
|
|
||||||
lh.lighthouses.Store(&lighthouses)
|
lh.lighthouses.Store(&lighthouses)
|
||||||
@@ -64,10 +63,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
ifce.pki.cs.Store(cs)
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
conf := config.NewC(l)
|
||||||
defer cancel()
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
nc.intf = ifce
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
@@ -85,32 +84,33 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
nc.In(hostinfo.localIndexId)
|
nc.In(hostinfo)
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
assert.True(t, hostinfo.out.Load())
|
||||||
|
assert.True(t, hostinfo.in.Load())
|
||||||
|
|
||||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
// Do another traffic check tick, this host should be pending deletion now
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
|
assert.True(t, hostinfo.out.Load())
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||||
|
|
||||||
// Do a final traffic check tick, the host should now be removed
|
// Do a final traffic check tick, the host should now be removed
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs)
|
||||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
|
||||||
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,10 +146,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
ifce.pki.cs.Store(cs)
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
conf := config.NewC(l)
|
||||||
defer cancel()
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
nc.intf = ifce
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
@@ -167,33 +167,129 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
nc.In(hostinfo.localIndexId)
|
nc.In(hostinfo)
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnAddrs[0])
|
assert.True(t, hostinfo.in.Load())
|
||||||
|
assert.True(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
|
||||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
// Do another traffic check tick, this host should be pending deletion now
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||||
|
|
||||||
// We saw traffic, should no longer be pending deletion
|
// We saw traffic, should no longer be pending deletion
|
||||||
nc.In(hostinfo.localIndexId)
|
nc.In(hostinfo)
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||||
|
vpnAddrs := []netip.Addr{netip.MustParseAddr("172.1.1.2")}
|
||||||
|
preferredRanges := []netip.Prefix{localrange}
|
||||||
|
|
||||||
|
// Very incomplete mock objects
|
||||||
|
hostMap := newHostMap(l)
|
||||||
|
hostMap.preferredRanges.Store(&preferredRanges)
|
||||||
|
|
||||||
|
cs := &CertState{
|
||||||
|
initiatingVersion: cert.Version1,
|
||||||
|
privateKey: []byte{},
|
||||||
|
v1Cert: &dummyCert{version: cert.Version1},
|
||||||
|
v1HandshakeBytes: []byte{},
|
||||||
|
}
|
||||||
|
|
||||||
|
lh := newTestLighthouse()
|
||||||
|
ifce := &Interface{
|
||||||
|
hostMap: hostMap,
|
||||||
|
inside: &test.NoopTun{},
|
||||||
|
outside: &udp.NoopConn{},
|
||||||
|
firewall: &Firewall{},
|
||||||
|
lightHouse: lh,
|
||||||
|
pki: &PKI{},
|
||||||
|
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||||
|
l: l,
|
||||||
|
}
|
||||||
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
|
// Create manager
|
||||||
|
conf := config.NewC(l)
|
||||||
|
conf.Settings["tunnels"] = map[string]any{
|
||||||
|
"drop_inactive": true,
|
||||||
|
}
|
||||||
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
|
assert.True(t, nc.dropInactive.Load())
|
||||||
|
nc.intf = ifce
|
||||||
|
|
||||||
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
|
hostinfo := &HostInfo{
|
||||||
|
vpnAddrs: vpnAddrs,
|
||||||
|
localIndexId: 1099,
|
||||||
|
remoteIndexId: 9901,
|
||||||
|
}
|
||||||
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
|
myCert: &dummyCert{version: cert.Version1},
|
||||||
|
H: &noise.HandshakeState{},
|
||||||
|
}
|
||||||
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
|
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
|
||||||
|
nc.Out(hostinfo)
|
||||||
|
nc.In(hostinfo)
|
||||||
|
assert.True(t, hostinfo.out.Load())
|
||||||
|
assert.True(t, hostinfo.in.Load())
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
|
||||||
|
assert.Equal(t, tryRehandshake, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
|
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
|
||||||
|
assert.Equal(t, doNothing, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
|
// Do another traffic check tick, should still not be pending deletion
|
||||||
|
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
|
||||||
|
assert.Equal(t, doNothing, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||||
|
|
||||||
|
// Finally advance beyond the inactivity timeout
|
||||||
|
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
|
||||||
|
assert.Equal(t, closeTunnel, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||||
}
|
}
|
||||||
@@ -264,10 +360,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
ifce.disconnectInvalid.Store(true)
|
ifce.disconnectInvalid.Store(true)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
conf := config.NewC(l)
|
||||||
defer cancel()
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
nc.intf = ifce
|
||||||
ifce.connectionManager = nc
|
ifce.connectionManager = nc
|
||||||
|
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
@@ -350,6 +446,10 @@ func (d *dummyCert) PublicKey() []byte {
|
|||||||
return d.publicKey
|
return d.publicKey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *dummyCert) MarshalPublicKeyPEM() []byte {
|
||||||
|
return cert.MarshalPublicKeyToPEM(d.curve, d.publicKey)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *dummyCert) Signature() []byte {
|
func (d *dummyCert) Signature() []byte {
|
||||||
return d.signature
|
return d.signature
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/flynn/noise"
|
"github.com/flynn/noise"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/noiseutil"
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const ReplayWindow = 1024
|
const ReplayWindow = 1024
|
||||||
@@ -24,7 +24,7 @@ type ConnectionState struct {
|
|||||||
initiator bool
|
initiator bool
|
||||||
messageCounter atomic.Uint64
|
messageCounter atomic.Uint64
|
||||||
window *Bits
|
window *Bits
|
||||||
writeLock synctrace.Mutex
|
writeLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, initiator bool, pattern noise.HandshakePattern) (*ConnectionState, error) {
|
func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, initiator bool, pattern noise.HandshakePattern) (*ConnectionState, error) {
|
||||||
@@ -76,7 +76,6 @@ func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, i
|
|||||||
initiator: initiator,
|
initiator: initiator,
|
||||||
window: b,
|
window: b,
|
||||||
myCert: crt,
|
myCert: crt,
|
||||||
writeLock: synctrace.NewMutex("connection-state"),
|
|
||||||
}
|
}
|
||||||
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
|
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
|
||||||
ci.messageCounter.Add(2)
|
ci.messageCounter.Add(2)
|
||||||
|
|||||||
20
control.go
20
control.go
@@ -26,14 +26,15 @@ type controlHostLister interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Control struct {
|
type Control struct {
|
||||||
f *Interface
|
f *Interface
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
sshStart func()
|
sshStart func()
|
||||||
statsStart func()
|
statsStart func()
|
||||||
dnsStart func()
|
dnsStart func()
|
||||||
lighthouseStart func()
|
lighthouseStart func()
|
||||||
|
connectionManagerStart func(context.Context)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ControlHostInfo struct {
|
type ControlHostInfo struct {
|
||||||
@@ -63,6 +64,9 @@ func (c *Control) Start() {
|
|||||||
if c.dnsStart != nil {
|
if c.dnsStart != nil {
|
||||||
go c.dnsStart()
|
go c.dnsStart()
|
||||||
}
|
}
|
||||||
|
if c.connectionManagerStart != nil {
|
||||||
|
go c.connectionManagerStart(c.ctx)
|
||||||
|
}
|
||||||
if c.lighthouseStart != nil {
|
if c.lighthouseStart != nil {
|
||||||
c.lighthouseStart()
|
c.lighthouseStart()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnAddrs: []netip.Addr{vpnIp},
|
vpnAddrs: []netip.Addr{vpnIp},
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: map[netip.Addr]struct{}{},
|
relays: nil,
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByAddr: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
@@ -72,7 +72,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnAddrs: []netip.Addr{vpnIp2},
|
vpnAddrs: []netip.Addr{vpnIp2},
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: map[netip.Addr]struct{}{},
|
relays: nil,
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByAddr: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
"github.com/gaissmai/bart"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This whole thing should be rewritten to use context
|
// This whole thing should be rewritten to use context
|
||||||
@@ -21,7 +21,7 @@ var dnsServer *dns.Server
|
|||||||
var dnsAddr string
|
var dnsAddr string
|
||||||
|
|
||||||
type dnsRecords struct {
|
type dnsRecords struct {
|
||||||
synctrace.RWMutex
|
sync.RWMutex
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
dnsMap4 map[string]netip.Addr
|
dnsMap4 map[string]netip.Addr
|
||||||
dnsMap6 map[string]netip.Addr
|
dnsMap6 map[string]netip.Addr
|
||||||
@@ -31,7 +31,6 @@ type dnsRecords struct {
|
|||||||
|
|
||||||
func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords {
|
func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords {
|
||||||
return &dnsRecords{
|
return &dnsRecords{
|
||||||
RWMutex: synctrace.NewRWMutex("dns-records"),
|
|
||||||
l: l,
|
l: l,
|
||||||
dnsMap4: make(map[string]netip.Addr),
|
dnsMap4: make(map[string]netip.Addr),
|
||||||
dnsMap6: make(map[string]netip.Addr),
|
dnsMap6: make(map[string]netip.Addr),
|
||||||
|
|||||||
@@ -506,7 +506,7 @@ func TestReestablishRelays(t *testing.T) {
|
|||||||
curIndexes := len(myControl.GetHostmap().Indexes)
|
curIndexes := len(myControl.GetHostmap().Indexes)
|
||||||
for curIndexes >= start {
|
for curIndexes >= start {
|
||||||
curIndexes = len(myControl.GetHostmap().Indexes)
|
curIndexes = len(myControl.GetHostmap().Indexes)
|
||||||
r.Logf("Wait for the dead index to go away:start=%v indexes, currnet=%v indexes", start, curIndexes)
|
r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes)
|
||||||
myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me should fail"))
|
myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me should fail"))
|
||||||
|
|
||||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||||
@@ -1052,6 +1052,9 @@ func TestRehandshakingLoser(t *testing.T) {
|
|||||||
t.Log("Stand up a tunnel between me and them")
|
t.Log("Stand up a tunnel between me and them")
|
||||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
|
||||||
|
myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
|
||||||
|
theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||||
|
|
||||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
r.Log("Renew their certificate and spin until mine sees it")
|
r.Log("Renew their certificate and spin until mine sees it")
|
||||||
|
|||||||
@@ -129,6 +129,109 @@ func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name
|
|||||||
return control, vpnNetworks, udpAddr, c
|
return control, vpnNetworks, udpAddr, c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newServer creates a nebula instance with fewer assumptions
|
||||||
|
func newServer(caCrt []cert.Certificate, certs []cert.Certificate, key []byte, overrides m) (*nebula.Control, []netip.Prefix, netip.AddrPort, *config.C) {
|
||||||
|
l := NewTestLogger()
|
||||||
|
|
||||||
|
vpnNetworks := certs[len(certs)-1].Networks()
|
||||||
|
|
||||||
|
var udpAddr netip.AddrPort
|
||||||
|
if vpnNetworks[0].Addr().Is4() {
|
||||||
|
budpIp := vpnNetworks[0].Addr().As4()
|
||||||
|
budpIp[1] -= 128
|
||||||
|
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
|
||||||
|
} else {
|
||||||
|
budpIp := vpnNetworks[0].Addr().As16()
|
||||||
|
// beef for funsies
|
||||||
|
budpIp[2] = 190
|
||||||
|
budpIp[3] = 239
|
||||||
|
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
||||||
|
}
|
||||||
|
|
||||||
|
caStr := ""
|
||||||
|
for _, ca := range caCrt {
|
||||||
|
x, err := ca.MarshalPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
caStr += string(x)
|
||||||
|
}
|
||||||
|
certStr := ""
|
||||||
|
for _, c := range certs {
|
||||||
|
x, err := c.MarshalPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
certStr += string(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
mc := m{
|
||||||
|
"pki": m{
|
||||||
|
"ca": caStr,
|
||||||
|
"cert": certStr,
|
||||||
|
"key": string(key),
|
||||||
|
},
|
||||||
|
//"tun": m{"disabled": true},
|
||||||
|
"firewall": m{
|
||||||
|
"outbound": []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"host": "any",
|
||||||
|
}},
|
||||||
|
"inbound": []m{{
|
||||||
|
"proto": "any",
|
||||||
|
"port": "any",
|
||||||
|
"host": "any",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
//"handshakes": m{
|
||||||
|
// "try_interval": "1s",
|
||||||
|
//},
|
||||||
|
"listen": m{
|
||||||
|
"host": udpAddr.Addr().String(),
|
||||||
|
"port": udpAddr.Port(),
|
||||||
|
},
|
||||||
|
"logging": m{
|
||||||
|
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", certs[0].Name()),
|
||||||
|
"level": l.Level.String(),
|
||||||
|
},
|
||||||
|
"timers": m{
|
||||||
|
"pending_deletion_interval": 2,
|
||||||
|
"connection_alive_interval": 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if overrides != nil {
|
||||||
|
final := m{}
|
||||||
|
err := mergo.Merge(&final, overrides, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
err = mergo.Merge(&final, mc, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
mc = final
|
||||||
|
}
|
||||||
|
|
||||||
|
cb, err := yaml.Marshal(mc)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
cStr := string(cb)
|
||||||
|
c.LoadString(cStr)
|
||||||
|
|
||||||
|
control, err := nebula.Main(c, false, "e2e-test", l, nil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return control, vpnNetworks, udpAddr, c
|
||||||
|
}
|
||||||
|
|
||||||
type doneCb func()
|
type doneCb func()
|
||||||
|
|
||||||
func deadline(t *testing.T, seconds time.Duration) doneCb {
|
func deadline(t *testing.T, seconds time.Duration) doneCb {
|
||||||
|
|||||||
@@ -700,6 +700,7 @@ func (r *R) FlushAll() {
|
|||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("Can't FlushAll for host: " + p.To.String())
|
panic("Can't FlushAll for host: " + p.To.String())
|
||||||
}
|
}
|
||||||
|
receiver.InjectUDPPacket(p)
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
320
e2e/tunnels_test.go
Normal file
320
e2e/tunnels_test.go
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/cert_test"
|
||||||
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDropInactiveTunnels(t *testing.T) {
|
||||||
|
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||||
|
// under ideal conditions
|
||||||
|
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
|
||||||
|
|
||||||
|
// Share our underlay information
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel between me and them works")
|
||||||
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.Log("Go inactive and wait for the tunnels to get dropped")
|
||||||
|
waitStart := time.Now()
|
||||||
|
for {
|
||||||
|
myIndexes := len(myControl.GetHostmap().Indexes)
|
||||||
|
theirIndexes := len(theirControl.GetHostmap().Indexes)
|
||||||
|
if myIndexes == 0 && theirIndexes == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
since := time.Since(waitStart)
|
||||||
|
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
|
||||||
|
if since > time.Second*30 {
|
||||||
|
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
r.FlushAll()
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCertUpgrade(t *testing.T) {
|
||||||
|
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||||
|
// under ideal conditions
|
||||||
|
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
caB, err := ca.MarshalPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
ca2, _, caKey2, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
|
||||||
|
ca2B, err := ca2.MarshalPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
caStr := fmt.Sprintf("%s\n%s", caB, ca2B)
|
||||||
|
|
||||||
|
myCert, _, myPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.1/24")}, nil, []string{})
|
||||||
|
_, myCert2Pem := cert_test.NewTestCertDifferentVersion(myCert, cert.Version2, ca2, caKey2)
|
||||||
|
|
||||||
|
theirCert, _, theirPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.2/24")}, nil, []string{})
|
||||||
|
theirCert2, _ := cert_test.NewTestCertDifferentVersion(theirCert, cert.Version2, ca2, caKey2)
|
||||||
|
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, myC := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{myCert}, myPrivKey, m{})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{theirCert, theirCert2}, theirPrivKey, m{})
|
||||||
|
|
||||||
|
// Share our underlay information
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel between me and them works")
|
||||||
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
r.Log("yay")
|
||||||
|
//todo ???
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
r.FlushAll()
|
||||||
|
|
||||||
|
mc := m{
|
||||||
|
"pki": m{
|
||||||
|
"ca": caStr,
|
||||||
|
"cert": string(myCert2Pem),
|
||||||
|
"key": string(myPrivKey),
|
||||||
|
},
|
||||||
|
//"tun": m{"disabled": true},
|
||||||
|
"firewall": myC.Settings["firewall"],
|
||||||
|
//"handshakes": m{
|
||||||
|
// "try_interval": "1s",
|
||||||
|
//},
|
||||||
|
"listen": myC.Settings["listen"],
|
||||||
|
"logging": myC.Settings["logging"],
|
||||||
|
"timers": myC.Settings["timers"],
|
||||||
|
}
|
||||||
|
|
||||||
|
cb, err := yaml.Marshal(mc)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logf("reload new v2-only config")
|
||||||
|
err = myC.ReloadConfigString(string(cb))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
r.Log("yay, spin until their sees it")
|
||||||
|
waitStart := time.Now()
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||||
|
if c == nil {
|
||||||
|
r.Log("nil")
|
||||||
|
} else {
|
||||||
|
version := c.Cert.Version()
|
||||||
|
r.Logf("version %d", version)
|
||||||
|
if version == cert.Version2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
since := time.Since(waitStart)
|
||||||
|
if since > time.Second*10 {
|
||||||
|
t.Fatal("Cert should be new by now")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCertDowngrade(t *testing.T) {
|
||||||
|
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||||
|
// under ideal conditions
|
||||||
|
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
caB, err := ca.MarshalPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
ca2, _, caKey2, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
|
||||||
|
ca2B, err := ca2.MarshalPEM()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
caStr := fmt.Sprintf("%s\n%s", caB, ca2B)
|
||||||
|
|
||||||
|
myCert, _, myPrivKey, myCertPem := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.1/24")}, nil, []string{})
|
||||||
|
myCert2, _ := cert_test.NewTestCertDifferentVersion(myCert, cert.Version2, ca2, caKey2)
|
||||||
|
|
||||||
|
theirCert, _, theirPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.2/24")}, nil, []string{})
|
||||||
|
theirCert2, _ := cert_test.NewTestCertDifferentVersion(theirCert, cert.Version2, ca2, caKey2)
|
||||||
|
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, myC := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{myCert2}, myPrivKey, m{})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{theirCert, theirCert2}, theirPrivKey, m{})
|
||||||
|
|
||||||
|
// Share our underlay information
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel between me and them works")
|
||||||
|
//assertTunnel(t, theirVpnIpNet[0].Addr(), myVpnIpNet[0].Addr(), theirControl, myControl, r)
|
||||||
|
//r.Log("yay")
|
||||||
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
r.Log("yay")
|
||||||
|
//todo ???
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
r.FlushAll()
|
||||||
|
|
||||||
|
mc := m{
|
||||||
|
"pki": m{
|
||||||
|
"ca": caStr,
|
||||||
|
"cert": string(myCertPem),
|
||||||
|
"key": string(myPrivKey),
|
||||||
|
},
|
||||||
|
"firewall": myC.Settings["firewall"],
|
||||||
|
"listen": myC.Settings["listen"],
|
||||||
|
"logging": myC.Settings["logging"],
|
||||||
|
"timers": myC.Settings["timers"],
|
||||||
|
}
|
||||||
|
|
||||||
|
cb, err := yaml.Marshal(mc)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logf("reload new v1-only config")
|
||||||
|
err = myC.ReloadConfigString(string(cb))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
r.Log("yay, spin until their sees it")
|
||||||
|
waitStart := time.Now()
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||||
|
c2 := myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
|
||||||
|
if c == nil || c2 == nil {
|
||||||
|
r.Log("nil")
|
||||||
|
} else {
|
||||||
|
version := c.Cert.Version()
|
||||||
|
theirVersion := c2.Cert.Version()
|
||||||
|
r.Logf("version %d,%d", version, theirVersion)
|
||||||
|
if version == cert.Version1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
since := time.Since(waitStart)
|
||||||
|
if since > time.Second*5 {
|
||||||
|
r.Log("it is unusual that the cert is not new yet, but not a failure yet")
|
||||||
|
}
|
||||||
|
if since > time.Second*10 {
|
||||||
|
r.Log("wtf")
|
||||||
|
t.Fatal("Cert should be new by now")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCertMismatchCorrection(t *testing.T) {
|
||||||
|
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||||
|
// under ideal conditions
|
||||||
|
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
ca2, _, caKey2, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
|
||||||
|
myCert, _, myPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.1/24")}, nil, []string{})
|
||||||
|
myCert2, _ := cert_test.NewTestCertDifferentVersion(myCert, cert.Version2, ca2, caKey2)
|
||||||
|
|
||||||
|
theirCert, _, theirPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.2/24")}, nil, []string{})
|
||||||
|
theirCert2, _ := cert_test.NewTestCertDifferentVersion(theirCert, cert.Version2, ca2, caKey2)
|
||||||
|
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{myCert2}, myPrivKey, m{})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{theirCert, theirCert2}, theirPrivKey, m{})
|
||||||
|
|
||||||
|
// Share our underlay information
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel between me and them works")
|
||||||
|
//assertTunnel(t, theirVpnIpNet[0].Addr(), myVpnIpNet[0].Addr(), theirControl, myControl, r)
|
||||||
|
//r.Log("yay")
|
||||||
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
r.Log("yay")
|
||||||
|
//todo ???
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
r.FlushAll()
|
||||||
|
|
||||||
|
waitStart := time.Now()
|
||||||
|
for {
|
||||||
|
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||||
|
c := theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||||
|
c2 := myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
|
||||||
|
if c == nil || c2 == nil {
|
||||||
|
r.Log("nil")
|
||||||
|
} else {
|
||||||
|
version := c.Cert.Version()
|
||||||
|
theirVersion := c2.Cert.Version()
|
||||||
|
r.Logf("version %d,%d", version, theirVersion)
|
||||||
|
if version == theirVersion {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
since := time.Since(waitStart)
|
||||||
|
if since > time.Second*5 {
|
||||||
|
r.Log("wtf")
|
||||||
|
}
|
||||||
|
if since > time.Second*10 {
|
||||||
|
r.Log("wtf")
|
||||||
|
t.Fatal("Cert should be new by now")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
@@ -338,6 +338,18 @@ logging:
|
|||||||
# after receiving the response for lighthouse queries
|
# after receiving the response for lighthouse queries
|
||||||
#trigger_buffer: 64
|
#trigger_buffer: 64
|
||||||
|
|
||||||
|
# Tunnel manager settings
|
||||||
|
#tunnels:
|
||||||
|
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
|
||||||
|
# elapsed.
|
||||||
|
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
|
||||||
|
# This setting is reloadable
|
||||||
|
#drop_inactive: false
|
||||||
|
|
||||||
|
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
|
||||||
|
# inactive and eligible to be dropped.
|
||||||
|
# This setting is reloadable
|
||||||
|
#inactivity_timeout: 10m
|
||||||
|
|
||||||
# Nebula security group configuration
|
# Nebula security group configuration
|
||||||
firewall:
|
firewall:
|
||||||
|
|||||||
17
firewall.go
17
firewall.go
@@ -10,6 +10,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
"github.com/gaissmai/bart"
|
||||||
@@ -18,7 +19,6 @@ import (
|
|||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type FirewallInterface interface {
|
type FirewallInterface interface {
|
||||||
@@ -76,7 +76,7 @@ type firewallMetrics struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type FirewallConntrack struct {
|
type FirewallConntrack struct {
|
||||||
synctrace.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
Conns map[firewall.Packet]*conn
|
Conns map[firewall.Packet]*conn
|
||||||
TimerWheel *TimerWheel[firewall.Packet]
|
TimerWheel *TimerWheel[firewall.Packet]
|
||||||
@@ -164,7 +164,6 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
|||||||
|
|
||||||
return &Firewall{
|
return &Firewall{
|
||||||
Conntrack: &FirewallConntrack{
|
Conntrack: &FirewallConntrack{
|
||||||
Mutex: synctrace.NewMutex("firewall-conntrack"),
|
|
||||||
Conns: make(map[firewall.Packet]*conn),
|
Conns: make(map[firewall.Packet]*conn),
|
||||||
TimerWheel: NewTimerWheel[firewall.Packet](tmin, tmax),
|
TimerWheel: NewTimerWheel[firewall.Packet](tmin, tmax),
|
||||||
},
|
},
|
||||||
@@ -424,7 +423,7 @@ var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
|
|||||||
|
|
||||||
// Drop returns an error if the packet should be dropped, explaining why. It
|
// Drop returns an error if the packet should be dropped, explaining why. It
|
||||||
// returns nil if the packet should not be dropped.
|
// returns nil if the packet should not be dropped.
|
||||||
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache) error {
|
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.CAPool, localCache *firewall.ConntrackCache) error {
|
||||||
// Check if we spoke to this tuple, if we did then allow this packet
|
// Check if we spoke to this tuple, if we did then allow this packet
|
||||||
if f.inConns(fp, h, caPool, localCache) {
|
if f.inConns(fp, h, caPool, localCache) {
|
||||||
return nil
|
return nil
|
||||||
@@ -491,11 +490,9 @@ func (f *Firewall) EmitStats() {
|
|||||||
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache) bool {
|
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool, localCache *firewall.ConntrackCache) bool {
|
||||||
if localCache != nil {
|
if localCache != nil && localCache.Has(fp) {
|
||||||
if _, ok := localCache[fp]; ok {
|
return true
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
conntrack := f.Conntrack
|
conntrack := f.Conntrack
|
||||||
conntrack.Lock()
|
conntrack.Lock()
|
||||||
@@ -560,7 +557,7 @@ func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool,
|
|||||||
conntrack.Unlock()
|
conntrack.Unlock()
|
||||||
|
|
||||||
if localCache != nil {
|
if localCache != nil {
|
||||||
localCache[fp] = struct{}{}
|
localCache.Add(fp)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package firewall
|
package firewall
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -9,13 +10,58 @@ import (
|
|||||||
|
|
||||||
// ConntrackCache is used as a local routine cache to know if a given flow
|
// ConntrackCache is used as a local routine cache to know if a given flow
|
||||||
// has been seen in the conntrack table.
|
// has been seen in the conntrack table.
|
||||||
type ConntrackCache map[Packet]struct{}
|
type ConntrackCache struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
entries map[Packet]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConntrackCache() *ConntrackCache {
|
||||||
|
return &ConntrackCache{entries: make(map[Packet]struct{})}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConntrackCache) Has(p Packet) bool {
|
||||||
|
if c == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
_, ok := c.entries[p]
|
||||||
|
c.mu.Unlock()
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConntrackCache) Add(p Packet) {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
c.entries[p] = struct{}{}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConntrackCache) Len() int {
|
||||||
|
if c == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
l := len(c.entries)
|
||||||
|
c.mu.Unlock()
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConntrackCache) Reset(capHint int) {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
c.entries = make(map[Packet]struct{}, capHint)
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
type ConntrackCacheTicker struct {
|
type ConntrackCacheTicker struct {
|
||||||
cacheV uint64
|
cacheV uint64
|
||||||
cacheTick atomic.Uint64
|
cacheTick atomic.Uint64
|
||||||
|
|
||||||
cache ConntrackCache
|
cache *ConntrackCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
|
func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
|
||||||
@@ -23,9 +69,7 @@ func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &ConntrackCacheTicker{
|
c := &ConntrackCacheTicker{cache: newConntrackCache()}
|
||||||
cache: ConntrackCache{},
|
|
||||||
}
|
|
||||||
|
|
||||||
go c.tick(d)
|
go c.tick(d)
|
||||||
|
|
||||||
@@ -41,17 +85,17 @@ func (c *ConntrackCacheTicker) tick(d time.Duration) {
|
|||||||
|
|
||||||
// Get checks if the cache ticker has moved to the next version before returning
|
// Get checks if the cache ticker has moved to the next version before returning
|
||||||
// the map. If it has moved, we reset the map.
|
// the map. If it has moved, we reset the map.
|
||||||
func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache {
|
func (c *ConntrackCacheTicker) Get(l *logrus.Logger) *ConntrackCache {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if tick := c.cacheTick.Load(); tick != c.cacheV {
|
if tick := c.cacheTick.Load(); tick != c.cacheV {
|
||||||
c.cacheV = tick
|
c.cacheV = tick
|
||||||
if ll := len(c.cache); ll > 0 {
|
if ll := c.cache.Len(); ll > 0 {
|
||||||
if l.Level == logrus.DebugLevel {
|
if l.Level == logrus.DebugLevel {
|
||||||
l.WithField("len", ll).Debug("resetting conntrack cache")
|
l.WithField("len", ll).Debug("resetting conntrack cache")
|
||||||
}
|
}
|
||||||
c.cache = make(ConntrackCache, ll)
|
c.cache.Reset(ll)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
241
firewall_test.go
241
firewall_test.go
@@ -68,6 +68,9 @@ func TestFirewall_AddRule(t *testing.T) {
|
|||||||
ti, err := netip.ParsePrefix("1.2.3.4/32")
|
ti, err := netip.ParsePrefix("1.2.3.4/32")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ti6, err := netip.ParsePrefix("fd12::34/128")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
require.NoError(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
// An empty rule is any
|
// An empty rule is any
|
||||||
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
||||||
@@ -92,12 +95,24 @@ func TestFirewall_AddRule(t *testing.T) {
|
|||||||
_, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
|
_, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
|
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti6, netip.Prefix{}, "", ""))
|
||||||
|
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
|
_, ok = fw.OutRules.AnyProto[1].Any.CIDR.Get(ti6)
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
|
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
|
||||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
|
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
|
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti6, "", ""))
|
||||||
|
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||||
|
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti6)
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.NoError(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
|
require.NoError(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
|
||||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||||
@@ -117,6 +132,13 @@ func TestFirewall_AddRule(t *testing.T) {
|
|||||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
|
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
|
||||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||||
|
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
|
anyIp6, err := netip.ParsePrefix("::/0")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp6, netip.Prefix{}, "", ""))
|
||||||
|
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||||
|
|
||||||
// Test error conditions
|
// Test error conditions
|
||||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||||
require.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
require.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
@@ -199,6 +221,82 @@ func TestFirewall_Drop(t *testing.T) {
|
|||||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFirewall_DropV6(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
ob := &bytes.Buffer{}
|
||||||
|
l.SetOutput(ob)
|
||||||
|
|
||||||
|
p := firewall.Packet{
|
||||||
|
LocalAddr: netip.MustParseAddr("fd12::34"),
|
||||||
|
RemoteAddr: netip.MustParseAddr("fd12::34"),
|
||||||
|
LocalPort: 10,
|
||||||
|
RemotePort: 90,
|
||||||
|
Protocol: firewall.ProtoUDP,
|
||||||
|
Fragment: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
c := dummyCert{
|
||||||
|
name: "host1",
|
||||||
|
networks: []netip.Prefix{netip.MustParsePrefix("fd12::34/120")},
|
||||||
|
groups: []string{"default-group"},
|
||||||
|
issuer: "signer-shasum",
|
||||||
|
}
|
||||||
|
h := HostInfo{
|
||||||
|
ConnectionState: &ConnectionState{
|
||||||
|
peerCert: &cert.CachedCertificate{
|
||||||
|
Certificate: &c,
|
||||||
|
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
vpnAddrs: []netip.Addr{netip.MustParseAddr("fd12::34")},
|
||||||
|
}
|
||||||
|
h.buildNetworks(c.networks, c.unsafeNetworks)
|
||||||
|
|
||||||
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
|
// Drop outbound
|
||||||
|
assert.Equal(t, ErrNoMatchingRule, fw.Drop(p, false, &h, cp, nil))
|
||||||
|
// Allow inbound
|
||||||
|
resetConntrack(fw)
|
||||||
|
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
|
// Allow outbound because conntrack
|
||||||
|
require.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||||
|
|
||||||
|
// test remote mismatch
|
||||||
|
oldRemote := p.RemoteAddr
|
||||||
|
p.RemoteAddr = netip.MustParseAddr("fd12::56")
|
||||||
|
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||||
|
p.RemoteAddr = oldRemote
|
||||||
|
|
||||||
|
// ensure signer doesn't get in the way of group checks
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||||
|
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
|
// test caSha doesn't drop on match
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||||
|
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
|
|
||||||
|
// ensure ca name doesn't get in the way of group checks
|
||||||
|
cp.CAs["signer-shasum"] = &cert.CachedCertificate{Certificate: &dummyCert{name: "ca-good"}}
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||||
|
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||||
|
|
||||||
|
// test caName doesn't drop on match
|
||||||
|
cp.CAs["signer-shasum"] = &cert.CachedCertificate{Certificate: &dummyCert{name: "ca-good"}}
|
||||||
|
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||||
|
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkFirewallTable_match(b *testing.B) {
|
func BenchmarkFirewallTable_match(b *testing.B) {
|
||||||
f := &Firewall{}
|
f := &Firewall{}
|
||||||
ft := FirewallTable{
|
ft := FirewallTable{
|
||||||
@@ -208,6 +306,10 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
pfix := netip.MustParsePrefix("172.1.1.1/32")
|
pfix := netip.MustParsePrefix("172.1.1.1/32")
|
||||||
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix, netip.Prefix{}, "", "")
|
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix, netip.Prefix{}, "", "")
|
||||||
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix, "", "")
|
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix, "", "")
|
||||||
|
|
||||||
|
pfix6 := netip.MustParsePrefix("fd11::11/128")
|
||||||
|
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix6, netip.Prefix{}, "", "")
|
||||||
|
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix6, "", "")
|
||||||
cp := cert.NewCAPool()
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
b.Run("fail on proto", func(b *testing.B) {
|
b.Run("fail on proto", func(b *testing.B) {
|
||||||
@@ -239,6 +341,15 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: ip.Addr()}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: ip.Addr()}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
b.Run("pass proto, port, fail on local CIDRv6", func(b *testing.B) {
|
||||||
|
c := &cert.CachedCertificate{
|
||||||
|
Certificate: &dummyCert{},
|
||||||
|
}
|
||||||
|
ip := netip.MustParsePrefix("fd99::99/128")
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: ip.Addr()}, true, c, cp))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.CachedCertificate{
|
||||||
@@ -252,6 +363,18 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
b.Run("pass proto, port, any local CIDRv6, fail all group, name, and cidr", func(b *testing.B) {
|
||||||
|
c := &cert.CachedCertificate{
|
||||||
|
Certificate: &dummyCert{
|
||||||
|
name: "nope",
|
||||||
|
networks: []netip.Prefix{netip.MustParsePrefix("fd99::99/128")},
|
||||||
|
},
|
||||||
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.CachedCertificate{
|
||||||
@@ -265,6 +388,18 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
b.Run("pass proto, port, specific local CIDRv6, fail all group, name, and cidr", func(b *testing.B) {
|
||||||
|
c := &cert.CachedCertificate{
|
||||||
|
Certificate: &dummyCert{
|
||||||
|
name: "nope",
|
||||||
|
networks: []netip.Prefix{netip.MustParsePrefix("fd99::99/128")},
|
||||||
|
},
|
||||||
|
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix6.Addr()}, true, c, cp))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
b.Run("pass on group on any local cidr", func(b *testing.B) {
|
b.Run("pass on group on any local cidr", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.CachedCertificate{
|
||||||
@@ -289,6 +424,17 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
|||||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
b.Run("pass on group on specific local cidr6", func(b *testing.B) {
|
||||||
|
c := &cert.CachedCertificate{
|
||||||
|
Certificate: &dummyCert{
|
||||||
|
name: "nope",
|
||||||
|
},
|
||||||
|
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix6.Addr()}, true, c, cp))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
b.Run("pass on name", func(b *testing.B) {
|
b.Run("pass on name", func(b *testing.B) {
|
||||||
c := &cert.CachedCertificate{
|
c := &cert.CachedCertificate{
|
||||||
@@ -447,6 +593,42 @@ func TestFirewall_Drop3(t *testing.T) {
|
|||||||
require.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
require.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFirewall_Drop3V6(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
ob := &bytes.Buffer{}
|
||||||
|
l.SetOutput(ob)
|
||||||
|
|
||||||
|
p := firewall.Packet{
|
||||||
|
LocalAddr: netip.MustParseAddr("fd12::34"),
|
||||||
|
RemoteAddr: netip.MustParseAddr("fd12::34"),
|
||||||
|
LocalPort: 1,
|
||||||
|
RemotePort: 1,
|
||||||
|
Protocol: firewall.ProtoUDP,
|
||||||
|
Fragment: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
network := netip.MustParsePrefix("fd12::34/120")
|
||||||
|
c := cert.CachedCertificate{
|
||||||
|
Certificate: &dummyCert{
|
||||||
|
name: "host-owner",
|
||||||
|
networks: []netip.Prefix{network},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
h := HostInfo{
|
||||||
|
ConnectionState: &ConnectionState{
|
||||||
|
peerCert: &c,
|
||||||
|
},
|
||||||
|
vpnAddrs: []netip.Addr{network.Addr()},
|
||||||
|
}
|
||||||
|
h.buildNetworks(c.Certificate.Networks(), c.Certificate.UnsafeNetworks())
|
||||||
|
|
||||||
|
// Test a remote address match
|
||||||
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
||||||
|
cp := cert.NewCAPool()
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.MustParsePrefix("fd12::34/120"), netip.Prefix{}, "", ""))
|
||||||
|
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||||
|
}
|
||||||
|
|
||||||
func TestFirewall_DropConntrackReload(t *testing.T) {
|
func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||||
l := test.NewLogger()
|
l := test.NewLogger()
|
||||||
ob := &bytes.Buffer{}
|
ob := &bytes.Buffer{}
|
||||||
@@ -510,6 +692,50 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
|||||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFirewall_DropIPSpoofing(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
ob := &bytes.Buffer{}
|
||||||
|
l.SetOutput(ob)
|
||||||
|
|
||||||
|
c := cert.CachedCertificate{
|
||||||
|
Certificate: &dummyCert{
|
||||||
|
name: "host-owner",
|
||||||
|
networks: []netip.Prefix{netip.MustParsePrefix("192.0.2.1/24")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
c1 := cert.CachedCertificate{
|
||||||
|
Certificate: &dummyCert{
|
||||||
|
name: "host",
|
||||||
|
networks: []netip.Prefix{netip.MustParsePrefix("192.0.2.2/24")},
|
||||||
|
unsafeNetworks: []netip.Prefix{netip.MustParsePrefix("198.51.100.0/24")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
h1 := HostInfo{
|
||||||
|
ConnectionState: &ConnectionState{
|
||||||
|
peerCert: &c1,
|
||||||
|
},
|
||||||
|
vpnAddrs: []netip.Addr{c1.Certificate.Networks()[0].Addr()},
|
||||||
|
}
|
||||||
|
h1.buildNetworks(c1.Certificate.Networks(), c1.Certificate.UnsafeNetworks())
|
||||||
|
|
||||||
|
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
||||||
|
|
||||||
|
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||||
|
cp := cert.NewCAPool()
|
||||||
|
|
||||||
|
// Packet spoofed by `c1`. Note that the remote addr is not a valid one.
|
||||||
|
p := firewall.Packet{
|
||||||
|
LocalAddr: netip.MustParseAddr("192.0.2.1"),
|
||||||
|
RemoteAddr: netip.MustParseAddr("192.0.2.3"),
|
||||||
|
LocalPort: 1,
|
||||||
|
RemotePort: 1,
|
||||||
|
Protocol: firewall.ProtoUDP,
|
||||||
|
Fragment: false,
|
||||||
|
}
|
||||||
|
assert.Equal(t, fw.Drop(p, true, &h1, cp, nil), ErrInvalidRemoteIP)
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkLookup(b *testing.B) {
|
func BenchmarkLookup(b *testing.B) {
|
||||||
ml := func(m map[string]struct{}, a [][]string) {
|
ml := func(m map[string]struct{}, a [][]string) {
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
@@ -727,6 +953,21 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
|||||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
|
||||||
|
|
||||||
|
// Test adding rule with cidr ipv6
|
||||||
|
cidr6 := netip.MustParsePrefix("fd00::/8")
|
||||||
|
conf = config.NewC(l)
|
||||||
|
mf = &mockFirewall{}
|
||||||
|
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "cidr": cidr6.String()}}}
|
||||||
|
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr6, localIp: netip.Prefix{}}, mf.lastCall)
|
||||||
|
|
||||||
|
// Test adding rule with local_cidr ipv6
|
||||||
|
conf = config.NewC(l)
|
||||||
|
mf = &mockFirewall{}
|
||||||
|
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "local_cidr": cidr6.String()}}}
|
||||||
|
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||||
|
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr6}, mf.lastCall)
|
||||||
|
|
||||||
// Test adding rule with ca_sha
|
// Test adding rule with ca_sha
|
||||||
conf = config.NewC(l)
|
conf = config.NewC(l)
|
||||||
mf = &mockFirewall{}
|
mf = &mockFirewall{}
|
||||||
|
|||||||
48
go.mod
48
go.mod
@@ -1,40 +1,37 @@
|
|||||||
module github.com/slackhq/nebula
|
module github.com/slackhq/nebula
|
||||||
|
|
||||||
go 1.23.0
|
go 1.25
|
||||||
|
|
||||||
toolchain go1.24.1
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.1
|
dario.cat/mergo v1.0.2
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||||
github.com/armon/go-radix v1.0.0
|
github.com/armon/go-radix v1.0.0
|
||||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||||
github.com/flynn/noise v1.1.0
|
github.com/flynn/noise v1.1.0
|
||||||
github.com/gaissmai/bart v0.20.4
|
github.com/gaissmai/bart v0.25.0
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/google/gopacket v1.1.19
|
github.com/google/gopacket v1.1.19
|
||||||
github.com/kardianos/service v1.2.2
|
github.com/kardianos/service v1.2.4
|
||||||
github.com/miekg/dns v1.1.65
|
github.com/miekg/dns v1.1.68
|
||||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b
|
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b
|
||||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||||
github.com/prometheus/client_golang v1.22.0
|
github.com/prometheus/client_golang v1.23.2
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6
|
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6
|
||||||
github.com/stretchr/testify v1.10.0
|
github.com/stretchr/testify v1.11.1
|
||||||
github.com/vishvananda/netlink v1.3.0
|
github.com/vishvananda/netlink v1.3.1
|
||||||
github.com/wadey/synctrace v0.0.0-20250612192159-94547ef50dfe
|
golang.org/x/crypto v0.43.0
|
||||||
golang.org/x/crypto v0.37.0
|
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
||||||
golang.org/x/net v0.39.0
|
golang.org/x/net v0.45.0
|
||||||
golang.org/x/sync v0.13.0
|
golang.org/x/sync v0.17.0
|
||||||
golang.org/x/sys v0.32.0
|
golang.org/x/sys v0.37.0
|
||||||
golang.org/x/term v0.31.0
|
golang.org/x/term v0.36.0
|
||||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||||
google.golang.org/protobuf v1.36.6
|
google.golang.org/protobuf v1.36.8
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
||||||
)
|
)
|
||||||
@@ -43,18 +40,15 @@ require (
|
|||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/emirpasic/gods v1.18.1 // indirect
|
|
||||||
github.com/google/btree v1.1.2 // indirect
|
github.com/google/btree v1.1.2 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
|
||||||
github.com/heimdalr/dag v1.4.0 // indirect
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
github.com/prometheus/common v0.62.0 // indirect
|
github.com/prometheus/common v0.66.1 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.16.1 // indirect
|
||||||
github.com/timandy/routine v1.1.5 // indirect
|
github.com/vishvananda/netns v0.0.5 // indirect
|
||||||
github.com/vishvananda/netns v0.0.4 // indirect
|
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||||
golang.org/x/mod v0.23.0 // indirect
|
golang.org/x/mod v0.24.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
golang.org/x/tools v0.30.0 // indirect
|
golang.org/x/tools v0.33.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
93
go.sum
93
go.sum
@@ -1,6 +1,6 @@
|
|||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
@@ -22,12 +22,10 @@ github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
|
||||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
|
||||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
github.com/gaissmai/bart v0.20.4 h1:Ik47r1fy3jRVU+1eYzKSW3ho2UgBVTVnUS8O993584U=
|
github.com/gaissmai/bart v0.25.0 h1:eqiokVPqM3F94vJ0bTHXHtH91S8zkKL+bKh+BsGOsJM=
|
||||||
github.com/gaissmai/bart v0.20.4/go.mod h1:cEed+ge8dalcbpi8wtS9x9m2hn/fNJH5suhdGQOHnYk=
|
github.com/gaissmai/bart v0.25.0/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
@@ -35,8 +33,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
|
||||||
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
@@ -62,18 +58,14 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
|
|||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/heimdalr/dag v1.4.0 h1:zG3JA4RDVLc55k3AXAgfwa+EgBNZ0TkfOO3C29Ucpmg=
|
|
||||||
github.com/heimdalr/dag v1.4.0/go.mod h1:OCh6ghKmU0hPjtwMqWBoNxPmtRioKd1xSu7Zs4sbIqM=
|
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
|
github.com/kardianos/service v1.2.4 h1:XNlGtZOYNx2u91urOdg/Kfmc+gfmuIo1Dd3rEi2OgBk=
|
||||||
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
github.com/kardianos/service v1.2.4/go.mod h1:E4V9ufUuY82F7Ztlu1eN9VXWIQxg8NoLQlmFe0MtrXc=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||||
@@ -91,8 +83,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
|
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
|
||||||
github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
|
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
|
||||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b h1:J/AzCvg5z0Hn1rqZUJjpbzALUmkKX0Zwbc/i4fw7Sfk=
|
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b h1:J/AzCvg5z0Hn1rqZUJjpbzALUmkKX0Zwbc/i4fw7Sfk=
|
||||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
@@ -114,24 +106,24 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
@@ -151,33 +143,33 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/timandy/routine v1.1.5 h1:LSpm7Iijwb9imIPlucl4krpr2EeCeAUvifiQ9Uf5X+M=
|
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
|
||||||
github.com/timandy/routine v1.1.5/go.mod h1:kXslgIosdY8LW0byTyPnenDgn4/azt2euufAq9rK51w=
|
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
|
||||||
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
|
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||||
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
|
||||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
|
||||||
github.com/wadey/synctrace v0.0.0-20250612192159-94547ef50dfe h1:dc8Q42VsX+ABr0drJw27f3smvGfcz7eB8rJx+IkVMAo=
|
|
||||||
github.com/wadey/synctrace v0.0.0-20250612192159-94547ef50dfe/go.mod h1:F2VCml4UxGPgAAqqm9T0ZfnVRWITrQS1EMZM+KCAm/Q=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
|
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||||
|
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
||||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@@ -188,8 +180,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -197,8 +189,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@@ -209,18 +201,17 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
@@ -231,8 +222,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@@ -251,8 +242,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
|||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOISE IX Handshakes
|
// NOISE IX Handshakes
|
||||||
@@ -24,13 +23,17 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're connecting to a v6 address we must use a v2 cert
|
|
||||||
cs := f.pki.getCertState()
|
cs := f.pki.getCertState()
|
||||||
v := cs.initiatingVersion
|
v := cs.initiatingVersion
|
||||||
for _, a := range hh.hostinfo.vpnAddrs {
|
if hh.initiatingVersionOverride != cert.VersionPre1 {
|
||||||
if a.Is6() {
|
v = hh.initiatingVersionOverride
|
||||||
v = cert.Version2
|
} else if v < cert.Version2 {
|
||||||
break
|
// If we're connecting to a v6 address we should encourage use of a V2 cert
|
||||||
|
for _, a := range hh.hostinfo.vpnAddrs {
|
||||||
|
if a.Is6() {
|
||||||
|
v = cert.Version2
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,6 +52,7 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
|||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
||||||
WithField("certVersion", v).
|
WithField("certVersion", v).
|
||||||
Error("Unable to handshake with host because no certificate handshake bytes is available")
|
Error("Unable to handshake with host because no certificate handshake bytes is available")
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
ci, err := NewConnectionState(f.l, cs, crt, true, noise.HandshakeIX)
|
ci, err := NewConnectionState(f.l, cs, crt, true, noise.HandshakeIX)
|
||||||
@@ -104,6 +108,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
||||||
WithField("certVersion", cs.initiatingVersion).
|
WithField("certVersion", cs.initiatingVersion).
|
||||||
Error("Unable to handshake with host because no certificate is available")
|
Error("Unable to handshake with host because no certificate is available")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ci, err := NewConnectionState(f.l, cs, crt, false, noise.HandshakeIX)
|
ci, err := NewConnectionState(f.l, cs, crt, false, noise.HandshakeIX)
|
||||||
@@ -144,8 +149,8 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
|
|
||||||
remoteCert, err := f.pki.GetCAPool().VerifyCertificate(time.Now(), rc)
|
remoteCert, err := f.pki.GetCAPool().VerifyCertificate(time.Now(), rc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fp, err := rc.Fingerprint()
|
fp, fperr := rc.Fingerprint()
|
||||||
if err != nil {
|
if fperr != nil {
|
||||||
fp = "<error generating certificate fingerprint>"
|
fp = "<error generating certificate fingerprint>"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,16 +169,19 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
|
|
||||||
if remoteCert.Certificate.Version() != ci.myCert.Version() {
|
if remoteCert.Certificate.Version() != ci.myCert.Version() {
|
||||||
// We started off using the wrong certificate version, lets see if we can match the version that was sent to us
|
// We started off using the wrong certificate version, lets see if we can match the version that was sent to us
|
||||||
rc := cs.getCertificate(remoteCert.Certificate.Version())
|
myCertOtherVersion := cs.getCertificate(remoteCert.Certificate.Version())
|
||||||
if rc == nil {
|
if myCertOtherVersion == nil {
|
||||||
f.l.WithError(err).WithField("udpAddr", addr).
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
f.l.WithError(err).WithFields(m{
|
||||||
Info("Unable to handshake with host due to missing certificate version")
|
"udpAddr": addr,
|
||||||
return
|
"handshake": m{"stage": 1, "style": "ix_psk0"},
|
||||||
|
"cert": remoteCert,
|
||||||
|
}).Debug("Might be unable to handshake with host due to missing certificate version")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Record the certificate we are actually using
|
||||||
|
ci.myCert = myCertOtherVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record the certificate we are actually using
|
|
||||||
ci.myCert = rc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(remoteCert.Certificate.Networks()) == 0 {
|
if len(remoteCert.Certificate.Networks()) == 0 {
|
||||||
@@ -250,8 +258,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
lastHandshakeTime: hs.Details.Time,
|
lastHandshakeTime: hs.Details.Time,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
RWMutex: synctrace.NewRWMutex("relay-state"),
|
relays: nil,
|
||||||
relays: map[netip.Addr]struct{}{},
|
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByAddr: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
@@ -459,9 +466,9 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||||
|
|
||||||
hostinfo.remotes.ResetBlockedRemotes()
|
hostinfo.remotes.RefreshFromHandshake(vpnAddrs)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -654,7 +661,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
|
|
||||||
// Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here
|
// Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here
|
||||||
f.handshakeManager.Complete(hostinfo, f)
|
f.handshakeManager.Complete(hostinfo, f)
|
||||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||||
|
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
||||||
@@ -669,7 +676,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
|
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo.remotes.ResetBlockedRemotes()
|
hostinfo.remotes.RefreshFromHandshake(vpnAddrs)
|
||||||
f.metricHandshakes.Update(duration)
|
f.metricHandshakes.Update(duration)
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"slices"
|
"slices"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
@@ -15,7 +16,6 @@ import (
|
|||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -45,7 +45,7 @@ type HandshakeConfig struct {
|
|||||||
|
|
||||||
type HandshakeManager struct {
|
type HandshakeManager struct {
|
||||||
// Mutex for interacting with the vpnIps and indexes maps
|
// Mutex for interacting with the vpnIps and indexes maps
|
||||||
synctrace.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
vpnIps map[netip.Addr]*HandshakeHostInfo
|
vpnIps map[netip.Addr]*HandshakeHostInfo
|
||||||
indexes map[uint32]*HandshakeHostInfo
|
indexes map[uint32]*HandshakeHostInfo
|
||||||
@@ -66,13 +66,14 @@ type HandshakeManager struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HandshakeHostInfo struct {
|
type HandshakeHostInfo struct {
|
||||||
synctrace.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
startTime time.Time // Time that we first started trying with this handshake
|
startTime time.Time // Time that we first started trying with this handshake
|
||||||
ready bool // Is the handshake ready
|
ready bool // Is the handshake ready
|
||||||
counter int64 // How many attempts have we made so far
|
initiatingVersionOverride cert.Version // Should we use a non-default cert version for this handshake?
|
||||||
lastRemotes []netip.AddrPort // Remotes that we sent to during the previous attempt
|
counter int64 // How many attempts have we made so far
|
||||||
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
lastRemotes []netip.AddrPort // Remotes that we sent to during the previous attempt
|
||||||
|
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
||||||
|
|
||||||
hostinfo *HostInfo
|
hostinfo *HostInfo
|
||||||
}
|
}
|
||||||
@@ -104,7 +105,6 @@ func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType,
|
|||||||
|
|
||||||
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||||
return &HandshakeManager{
|
return &HandshakeManager{
|
||||||
RWMutex: synctrace.NewRWMutex("handshake-manager"),
|
|
||||||
vpnIps: map[netip.Addr]*HandshakeHostInfo{},
|
vpnIps: map[netip.Addr]*HandshakeHostInfo{},
|
||||||
indexes: map[uint32]*HandshakeHostInfo{},
|
indexes: map[uint32]*HandshakeHostInfo{},
|
||||||
mainHostMap: mainHostMap,
|
mainHostMap: mainHostMap,
|
||||||
@@ -112,7 +112,7 @@ func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *Lig
|
|||||||
outside: outside,
|
outside: outside,
|
||||||
config: config,
|
config: config,
|
||||||
trigger: make(chan netip.Addr, config.triggerBuffer),
|
trigger: make(chan netip.Addr, config.triggerBuffer),
|
||||||
OutboundHandshakeTimer: NewLockingTimerWheel[netip.Addr]("outbound-handshake-timer", config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
OutboundHandshakeTimer: NewLockingTimerWheel[netip.Addr](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
||||||
messageMetrics: config.messageMetrics,
|
messageMetrics: config.messageMetrics,
|
||||||
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
||||||
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
||||||
@@ -451,15 +451,13 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
|
|||||||
vpnAddrs: []netip.Addr{vpnAddr},
|
vpnAddrs: []netip.Addr{vpnAddr},
|
||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
RWMutex: synctrace.NewRWMutex("relay-state"),
|
relays: nil,
|
||||||
relays: map[netip.Addr]struct{}{},
|
|
||||||
relayForByAddr: map[netip.Addr]*Relay{},
|
relayForByAddr: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
hh := &HandshakeHostInfo{
|
hh := &HandshakeHostInfo{
|
||||||
Mutex: synctrace.NewMutex("handshake-hostinfo"),
|
|
||||||
hostinfo: hostinfo,
|
hostinfo: hostinfo,
|
||||||
startTime: time.Now(),
|
startTime: time.Now(),
|
||||||
}
|
}
|
||||||
|
|||||||
62
hostmap.go
62
hostmap.go
@@ -4,6 +4,8 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"slices"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -13,15 +15,12 @@ import (
|
|||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// const ProbeLen = 100
|
|
||||||
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
|
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
|
||||||
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
||||||
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
||||||
const MaxRemotes = 10
|
const MaxRemotes = 10
|
||||||
const maxRecvError = 4
|
|
||||||
|
|
||||||
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
||||||
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
||||||
@@ -53,22 +52,22 @@ type Relay struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HostMap struct {
|
type HostMap struct {
|
||||||
synctrace.RWMutex //Because we concurrently read and write to our maps
|
sync.RWMutex //Because we concurrently read and write to our maps
|
||||||
Indexes map[uint32]*HostInfo
|
Indexes map[uint32]*HostInfo
|
||||||
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
||||||
RemoteIndexes map[uint32]*HostInfo
|
RemoteIndexes map[uint32]*HostInfo
|
||||||
Hosts map[netip.Addr]*HostInfo
|
Hosts map[netip.Addr]*HostInfo
|
||||||
preferredRanges atomic.Pointer[[]netip.Prefix]
|
preferredRanges atomic.Pointer[[]netip.Prefix]
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// For synchronization, treat the pointed-to Relay struct as immutable. To edit the Relay
|
// For synchronization, treat the pointed-to Relay struct as immutable. To edit the Relay
|
||||||
// struct, make a copy of an existing value, edit the fileds in the copy, and
|
// struct, make a copy of an existing value, edit the fileds in the copy, and
|
||||||
// then store a pointer to the new copy in both realyForBy* maps.
|
// then store a pointer to the new copy in both realyForBy* maps.
|
||||||
type RelayState struct {
|
type RelayState struct {
|
||||||
synctrace.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
relays map[netip.Addr]struct{} // Set of vpnAddr's of Hosts to use as relays to access this peer
|
relays []netip.Addr // Ordered set of VpnAddrs of Hosts to use as relays to access this peer
|
||||||
// For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data,
|
// For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data,
|
||||||
// modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with
|
// modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with
|
||||||
// the RelayState Lock held)
|
// the RelayState Lock held)
|
||||||
@@ -79,7 +78,12 @@ type RelayState struct {
|
|||||||
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
delete(rs.relays, ip)
|
for idx, val := range rs.relays {
|
||||||
|
if val == ip {
|
||||||
|
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
||||||
@@ -124,16 +128,16 @@ func (rs *RelayState) GetRelayForByAddr(addr netip.Addr) (*Relay, bool) {
|
|||||||
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
rs.relays[ip] = struct{}{}
|
if !slices.Contains(rs.relays, ip) {
|
||||||
|
rs.relays = append(rs.relays, ip)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
||||||
|
ret := make([]netip.Addr, len(rs.relays))
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
ret := make([]netip.Addr, 0, len(rs.relays))
|
copy(ret, rs.relays)
|
||||||
for ip := range rs.relays {
|
|
||||||
ret = append(ret, ip)
|
|
||||||
}
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,8 +223,7 @@ type HostInfo struct {
|
|||||||
// vpnAddrs is a list of vpn addresses assigned to this host that are within our own vpn networks
|
// vpnAddrs is a list of vpn addresses assigned to this host that are within our own vpn networks
|
||||||
// The host may have other vpn addresses that are outside our
|
// The host may have other vpn addresses that are outside our
|
||||||
// vpn networks but were removed because they are not usable
|
// vpn networks but were removed because they are not usable
|
||||||
vpnAddrs []netip.Addr
|
vpnAddrs []netip.Addr
|
||||||
recvError atomic.Uint32
|
|
||||||
|
|
||||||
// networks are both all vpn and unsafe networks assigned to this host
|
// networks are both all vpn and unsafe networks assigned to this host
|
||||||
networks *bart.Lite
|
networks *bart.Lite
|
||||||
@@ -250,6 +253,14 @@ type HostInfo struct {
|
|||||||
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
||||||
// Synchronised via hostmap lock and not the hostinfo lock.
|
// Synchronised via hostmap lock and not the hostinfo lock.
|
||||||
next, prev *HostInfo
|
next, prev *HostInfo
|
||||||
|
|
||||||
|
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
|
||||||
|
in, out, pendingDeletion atomic.Bool
|
||||||
|
|
||||||
|
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
|
||||||
|
// This value will be behind against actual tunnel utilization in the hot path.
|
||||||
|
// This should only be used by the ConnectionManagers ticker routine.
|
||||||
|
lastUsed time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type ViaSender struct {
|
type ViaSender struct {
|
||||||
@@ -288,7 +299,6 @@ func NewHostMapFromConfig(l *logrus.Logger, c *config.C) *HostMap {
|
|||||||
|
|
||||||
func newHostMap(l *logrus.Logger) *HostMap {
|
func newHostMap(l *logrus.Logger) *HostMap {
|
||||||
return &HostMap{
|
return &HostMap{
|
||||||
RWMutex: synctrace.NewRWMutex("hostmap"),
|
|
||||||
Indexes: map[uint32]*HostInfo{},
|
Indexes: map[uint32]*HostInfo{},
|
||||||
Relays: map[uint32]*HostInfo{},
|
Relays: map[uint32]*HostInfo{},
|
||||||
RemoteIndexes: map[uint32]*HostInfo{},
|
RemoteIndexes: map[uint32]*HostInfo{},
|
||||||
@@ -720,13 +730,6 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) b
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *HostInfo) RecvErrorExceeded() bool {
|
|
||||||
if i.recvError.Add(1) >= maxRecvError {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *HostInfo) buildNetworks(networks, unsafeNetworks []netip.Prefix) {
|
func (i *HostInfo) buildNetworks(networks, unsafeNetworks []netip.Prefix) {
|
||||||
if len(networks) == 1 && len(unsafeNetworks) == 0 {
|
if len(networks) == 1 && len(unsafeNetworks) == 0 {
|
||||||
// Simple case, no CIDRTree needed
|
// Simple case, no CIDRTree needed
|
||||||
@@ -735,7 +738,8 @@ func (i *HostInfo) buildNetworks(networks, unsafeNetworks []netip.Prefix) {
|
|||||||
|
|
||||||
i.networks = new(bart.Lite)
|
i.networks = new(bart.Lite)
|
||||||
for _, network := range networks {
|
for _, network := range networks {
|
||||||
i.networks.Insert(network)
|
nprefix := netip.PrefixFrom(network.Addr(), network.Addr().BitLen())
|
||||||
|
i.networks.Insert(nprefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, network := range unsafeNetworks {
|
for _, network := range unsafeNetworks {
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHostMap_MakePrimary(t *testing.T) {
|
func TestHostMap_MakePrimary(t *testing.T) {
|
||||||
@@ -215,3 +216,31 @@ func TestHostMap_reload(t *testing.T) {
|
|||||||
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
||||||
assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHostMap_RelayState(t *testing.T) {
|
||||||
|
h1 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 1}
|
||||||
|
a1 := netip.MustParseAddr("::1")
|
||||||
|
a2 := netip.MustParseAddr("2001::1")
|
||||||
|
|
||||||
|
h1.relayState.InsertRelayTo(a1)
|
||||||
|
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
||||||
|
h1.relayState.InsertRelayTo(a2)
|
||||||
|
assert.Equal(t, []netip.Addr{a1, a2}, h1.relayState.relays)
|
||||||
|
// Ensure that the first relay added is the first one returned in the copy
|
||||||
|
currentRelays := h1.relayState.CopyRelayIps()
|
||||||
|
require.Len(t, currentRelays, 2)
|
||||||
|
assert.Equal(t, a1, currentRelays[0])
|
||||||
|
|
||||||
|
// Deleting the last one in the list works ok
|
||||||
|
h1.relayState.DeleteRelay(a2)
|
||||||
|
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
||||||
|
|
||||||
|
// Deleting an element not in the list works ok
|
||||||
|
h1.relayState.DeleteRelay(a2)
|
||||||
|
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
||||||
|
|
||||||
|
// Deleting the only element in the list works ok
|
||||||
|
h1.relayState.DeleteRelay(a1)
|
||||||
|
assert.Equal(t, []netip.Addr{}, h1.relayState.relays)
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
112
inside.go
112
inside.go
@@ -2,16 +2,18 @@ package nebula
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/iputil"
|
"github.com/slackhq/nebula/iputil"
|
||||||
"github.com/slackhq/nebula/noiseutil"
|
"github.com/slackhq/nebula/noiseutil"
|
||||||
|
"github.com/slackhq/nebula/overlay"
|
||||||
"github.com/slackhq/nebula/routing"
|
"github.com/slackhq/nebula/routing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache *firewall.ConntrackCache) {
|
||||||
err := newPacket(packet, false, fwPacket)
|
err := newPacket(packet, false, fwPacket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
@@ -288,7 +290,7 @@ func (f *Interface) SendVia(via *HostInfo,
|
|||||||
c := via.ConnectionState.messageCounter.Add(1)
|
c := via.ConnectionState.messageCounter.Add(1)
|
||||||
|
|
||||||
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
||||||
f.connectionManager.Out(via.localIndexId)
|
f.connectionManager.Out(via)
|
||||||
|
|
||||||
// Authenticate the header and payload, but do not encrypt for this message type.
|
// Authenticate the header and payload, but do not encrypt for this message type.
|
||||||
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
||||||
@@ -335,9 +337,21 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
if ci.eKey == nil {
|
if ci.eKey == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
useRelay := !remote.IsValid() && !hostinfo.remote.IsValid()
|
target := remote
|
||||||
|
if !target.IsValid() {
|
||||||
|
target = hostinfo.remote
|
||||||
|
}
|
||||||
|
useRelay := !target.IsValid()
|
||||||
fullOut := out
|
fullOut := out
|
||||||
|
|
||||||
|
var pkt *overlay.Packet
|
||||||
|
if !useRelay && f.batches.Enabled() {
|
||||||
|
pkt = f.batches.newPacket()
|
||||||
|
if pkt != nil {
|
||||||
|
out = pkt.Payload()[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if useRelay {
|
if useRelay {
|
||||||
if len(out) < header.Len {
|
if len(out) < header.Len {
|
||||||
// out always has a capacity of mtu, but not always a length greater than the header.Len.
|
// out always has a capacity of mtu, but not always a length greater than the header.Len.
|
||||||
@@ -356,7 +370,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
|
|
||||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||||
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||||
f.connectionManager.Out(hostinfo.localIndexId)
|
f.connectionManager.Out(hostinfo)
|
||||||
|
|
||||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||||
// all our addrs and enable a faster roaming.
|
// all our addrs and enable a faster roaming.
|
||||||
@@ -371,41 +385,85 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
if len(p) > 0 && slicesOverlap(out, p) {
|
||||||
|
tmp := make([]byte, len(p))
|
||||||
|
copy(tmp, p)
|
||||||
|
p = tmp
|
||||||
|
}
|
||||||
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
|
||||||
if noiseutil.EncryptLockNeeded {
|
if noiseutil.EncryptLockNeeded {
|
||||||
ci.writeLock.Unlock()
|
ci.writeLock.Unlock()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if pkt != nil {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
hostinfo.logger(f.l).WithError(err).
|
hostinfo.logger(f.l).WithError(err).
|
||||||
WithField("udpAddr", remote).WithField("counter", c).
|
WithField("udpAddr", target).WithField("counter", c).
|
||||||
WithField("attemptedCounter", c).
|
WithField("attemptedCounter", c).
|
||||||
Error("Failed to encrypt outgoing packet")
|
Error("Failed to encrypt outgoing packet")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if remote.IsValid() {
|
if target.IsValid() {
|
||||||
err = f.writers[q].WriteTo(out, remote)
|
if pkt != nil {
|
||||||
if err != nil {
|
pkt.Len = len(out)
|
||||||
hostinfo.logger(f.l).WithError(err).
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
f.l.WithFields(logrus.Fields{
|
||||||
}
|
"queue": q,
|
||||||
} else if hostinfo.remote.IsValid() {
|
"dest": target,
|
||||||
err = f.writers[q].WriteTo(out, hostinfo.remote)
|
"payload_len": pkt.Len,
|
||||||
if err != nil {
|
"use_batches": true,
|
||||||
hostinfo.logger(f.l).WithError(err).
|
"remote_index": hostinfo.remoteIndexId,
|
||||||
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
}).Debug("enqueueing packet to UDP batch queue")
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Try to send via a relay
|
|
||||||
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
|
||||||
relayHostInfo, relay, err := f.hostMap.QueryVpnAddrsRelayFor(hostinfo.vpnAddrs, relayIP)
|
|
||||||
if err != nil {
|
|
||||||
hostinfo.relayState.DeleteRelay(relayIP)
|
|
||||||
hostinfo.logger(f.l).WithField("relay", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true)
|
if f.tryQueuePacket(q, pkt, target) {
|
||||||
break
|
return
|
||||||
|
}
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithFields(logrus.Fields{
|
||||||
|
"queue": q,
|
||||||
|
"dest": target,
|
||||||
|
}).Debug("failed to enqueue packet; falling back to immediate send")
|
||||||
|
}
|
||||||
|
f.writeImmediatePacket(q, pkt, target, hostinfo)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
if f.tryQueueDatagram(q, out, target) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.writeImmediate(q, out, target, hostinfo)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// fall back to relay path
|
||||||
|
if pkt != nil {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to send via a relay
|
||||||
|
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
||||||
|
relayHostInfo, relay, err := f.hostMap.QueryVpnAddrsRelayFor(hostinfo.vpnAddrs, relayIP)
|
||||||
|
if err != nil {
|
||||||
|
hostinfo.relayState.DeleteRelay(relayIP)
|
||||||
|
hostinfo.logger(f.l).WithField("relay", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true)
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// slicesOverlap reports whether the two byte slices share any portion of memory.
|
||||||
|
// cipher.AEAD.Seal requires plaintext and dst to live in disjoint regions.
|
||||||
|
func slicesOverlap(a, b []byte) bool {
|
||||||
|
if len(a) == 0 || len(b) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
aStart := uintptr(unsafe.Pointer(&a[0]))
|
||||||
|
aEnd := aStart + uintptr(len(a))
|
||||||
|
bStart := uintptr(unsafe.Pointer(&b[0]))
|
||||||
|
bEnd := bStart + uintptr(len(b))
|
||||||
|
return aStart < bEnd && bStart < aEnd
|
||||||
|
}
|
||||||
|
|||||||
737
interface.go
737
interface.go
@@ -8,6 +8,7 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -21,32 +22,43 @@ import (
|
|||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const mtu = 9001
|
const (
|
||||||
|
mtu = 9001
|
||||||
|
defaultGSOFlushInterval = 150 * time.Microsecond
|
||||||
|
defaultBatchQueueDepthFactor = 4
|
||||||
|
defaultGSOMaxSegments = 8
|
||||||
|
maxKernelGSOSegments = 64
|
||||||
|
)
|
||||||
|
|
||||||
type InterfaceConfig struct {
|
type InterfaceConfig struct {
|
||||||
HostMap *HostMap
|
HostMap *HostMap
|
||||||
Outside udp.Conn
|
Outside udp.Conn
|
||||||
Inside overlay.Device
|
Inside overlay.Device
|
||||||
pki *PKI
|
pki *PKI
|
||||||
Firewall *Firewall
|
Cipher string
|
||||||
ServeDns bool
|
Firewall *Firewall
|
||||||
HandshakeManager *HandshakeManager
|
ServeDns bool
|
||||||
lightHouse *LightHouse
|
HandshakeManager *HandshakeManager
|
||||||
checkInterval time.Duration
|
lightHouse *LightHouse
|
||||||
pendingDeletionInterval time.Duration
|
connectionManager *connectionManager
|
||||||
DropLocalBroadcast bool
|
DropLocalBroadcast bool
|
||||||
DropMulticast bool
|
DropMulticast bool
|
||||||
routines int
|
EnableGSO bool
|
||||||
MessageMetrics *MessageMetrics
|
EnableGRO bool
|
||||||
version string
|
GSOMaxSegments int
|
||||||
relayManager *relayManager
|
routines int
|
||||||
punchy *Punchy
|
MessageMetrics *MessageMetrics
|
||||||
|
version string
|
||||||
|
relayManager *relayManager
|
||||||
|
punchy *Punchy
|
||||||
|
|
||||||
tryPromoteEvery uint32
|
tryPromoteEvery uint32
|
||||||
reQueryEvery uint32
|
reQueryEvery uint32
|
||||||
reQueryWait time.Duration
|
reQueryWait time.Duration
|
||||||
|
|
||||||
ConntrackCacheTimeout time.Duration
|
ConntrackCacheTimeout time.Duration
|
||||||
|
BatchFlushInterval time.Duration
|
||||||
|
BatchQueueDepth int
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,9 +96,20 @@ type Interface struct {
|
|||||||
version string
|
version string
|
||||||
|
|
||||||
conntrackCacheTimeout time.Duration
|
conntrackCacheTimeout time.Duration
|
||||||
|
batchQueueDepth int
|
||||||
|
enableGSO bool
|
||||||
|
enableGRO bool
|
||||||
|
gsoMaxSegments int
|
||||||
|
batchUDPQueueGauge metrics.Gauge
|
||||||
|
batchUDPFlushCounter metrics.Counter
|
||||||
|
batchTunQueueGauge metrics.Gauge
|
||||||
|
batchTunFlushCounter metrics.Counter
|
||||||
|
batchFlushInterval atomic.Int64
|
||||||
|
sendSem chan struct{}
|
||||||
|
|
||||||
writers []udp.Conn
|
writers []udp.Conn
|
||||||
readers []io.ReadWriteCloser
|
readers []io.ReadWriteCloser
|
||||||
|
batches batchPipelines
|
||||||
|
|
||||||
metricHandshakes metrics.Histogram
|
metricHandshakes metrics.Histogram
|
||||||
messageMetrics *MessageMetrics
|
messageMetrics *MessageMetrics
|
||||||
@@ -157,6 +180,25 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
if c.Firewall == nil {
|
if c.Firewall == nil {
|
||||||
return nil, errors.New("no firewall rules")
|
return nil, errors.New("no firewall rules")
|
||||||
}
|
}
|
||||||
|
if c.connectionManager == nil {
|
||||||
|
return nil, errors.New("no connection manager")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.GSOMaxSegments <= 0 {
|
||||||
|
c.GSOMaxSegments = defaultGSOMaxSegments
|
||||||
|
}
|
||||||
|
if c.GSOMaxSegments > maxKernelGSOSegments {
|
||||||
|
c.GSOMaxSegments = maxKernelGSOSegments
|
||||||
|
}
|
||||||
|
if c.BatchQueueDepth <= 0 {
|
||||||
|
c.BatchQueueDepth = c.routines * defaultBatchQueueDepthFactor
|
||||||
|
}
|
||||||
|
if c.BatchFlushInterval < 0 {
|
||||||
|
c.BatchFlushInterval = 0
|
||||||
|
}
|
||||||
|
if c.BatchFlushInterval == 0 && c.EnableGSO {
|
||||||
|
c.BatchFlushInterval = defaultGSOFlushInterval
|
||||||
|
}
|
||||||
|
|
||||||
cs := c.pki.getCertState()
|
cs := c.pki.getCertState()
|
||||||
ifce := &Interface{
|
ifce := &Interface{
|
||||||
@@ -181,8 +223,12 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
myVpnAddrsTable: cs.myVpnAddrsTable,
|
myVpnAddrsTable: cs.myVpnAddrsTable,
|
||||||
myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable,
|
myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable,
|
||||||
relayManager: c.relayManager,
|
relayManager: c.relayManager,
|
||||||
|
connectionManager: c.connectionManager,
|
||||||
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
||||||
|
batchQueueDepth: c.BatchQueueDepth,
|
||||||
|
enableGSO: c.EnableGSO,
|
||||||
|
enableGRO: c.EnableGRO,
|
||||||
|
gsoMaxSegments: c.GSOMaxSegments,
|
||||||
|
|
||||||
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||||
messageMetrics: c.MessageMetrics,
|
messageMetrics: c.MessageMetrics,
|
||||||
@@ -195,10 +241,27 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
||||||
|
ifce.batchUDPQueueGauge = metrics.GetOrRegisterGauge("batch.udp.queue_depth", nil)
|
||||||
|
ifce.batchUDPFlushCounter = metrics.GetOrRegisterCounter("batch.udp.flushes", nil)
|
||||||
|
ifce.batchTunQueueGauge = metrics.GetOrRegisterGauge("batch.tun.queue_depth", nil)
|
||||||
|
ifce.batchTunFlushCounter = metrics.GetOrRegisterCounter("batch.tun.flushes", nil)
|
||||||
|
ifce.batchFlushInterval.Store(int64(c.BatchFlushInterval))
|
||||||
|
ifce.sendSem = make(chan struct{}, c.routines)
|
||||||
|
ifce.batches.init(c.Inside, c.routines, c.BatchQueueDepth, c.GSOMaxSegments)
|
||||||
ifce.reQueryEvery.Store(c.reQueryEvery)
|
ifce.reQueryEvery.Store(c.reQueryEvery)
|
||||||
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
||||||
|
if c.l.Level >= logrus.DebugLevel {
|
||||||
|
c.l.WithFields(logrus.Fields{
|
||||||
|
"enableGSO": c.EnableGSO,
|
||||||
|
"enableGRO": c.EnableGRO,
|
||||||
|
"gsoMaxSegments": c.GSOMaxSegments,
|
||||||
|
"batchQueueDepth": c.BatchQueueDepth,
|
||||||
|
"batchFlush": c.BatchFlushInterval,
|
||||||
|
"batching": ifce.batches.Enabled(),
|
||||||
|
}).Debug("initialized batch pipelines")
|
||||||
|
}
|
||||||
|
|
||||||
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
|
ifce.connectionManager.intf = ifce
|
||||||
|
|
||||||
return ifce, nil
|
return ifce, nil
|
||||||
}
|
}
|
||||||
@@ -245,6 +308,18 @@ func (f *Interface) run() {
|
|||||||
go f.listenOut(i)
|
go f.listenOut(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("batching", f.batches.Enabled()).Debug("starting interface run loops")
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.batches.Enabled() {
|
||||||
|
for i := 0; i < f.routines; i++ {
|
||||||
|
go f.runInsideBatchWorker(i)
|
||||||
|
go f.runTunWriteQueue(i)
|
||||||
|
go f.runSendQueue(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Launch n queues to read packets from tun dev
|
// Launch n queues to read packets from tun dev
|
||||||
for i := 0; i < f.routines; i++ {
|
for i := 0; i < f.routines; i++ {
|
||||||
go f.listenIn(f.readers[i], i)
|
go f.listenIn(f.readers[i], i)
|
||||||
@@ -276,6 +351,17 @@ func (f *Interface) listenOut(i int) {
|
|||||||
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
|
|
||||||
|
if f.batches.Enabled() {
|
||||||
|
if br, ok := reader.(overlay.BatchReader); ok {
|
||||||
|
f.listenInBatchLocked(reader, br, i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f.listenInLegacyLocked(reader, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) listenInLegacyLocked(reader io.ReadWriteCloser, i int) {
|
||||||
packet := make([]byte, mtu)
|
packet := make([]byte, mtu)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
fwPacket := &firewall.Packet{}
|
fwPacket := &firewall.Packet{}
|
||||||
@@ -299,6 +385,581 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Interface) listenInBatchLocked(raw io.ReadWriteCloser, reader overlay.BatchReader, i int) {
|
||||||
|
pool := f.batches.Pool()
|
||||||
|
if pool == nil {
|
||||||
|
f.l.Warn("batch pipeline enabled without an allocated pool; falling back to single-packet reads")
|
||||||
|
f.listenInLegacyLocked(raw, i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
packets, err := reader.ReadIntoBatch(pool)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrClosed) && f.closed.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if isVirtioHeadroomError(err) {
|
||||||
|
f.l.WithError(err).Warn("Batch reader fell back due to tun headroom issue")
|
||||||
|
f.listenInLegacyLocked(raw, i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f.l.WithError(err).Error("Error while reading outbound packet batch")
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(packets) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pkt := range packets {
|
||||||
|
if pkt == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !f.batches.enqueueRx(i, pkt) {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) runInsideBatchWorker(i int) {
|
||||||
|
queue := f.batches.rxQueue(i)
|
||||||
|
if queue == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]byte, mtu)
|
||||||
|
fwPacket := &firewall.Packet{}
|
||||||
|
nb := make([]byte, 12, 12)
|
||||||
|
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||||
|
|
||||||
|
for pkt := range queue {
|
||||||
|
if pkt == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f.consumeInsidePacket(pkt.Payload(), fwPacket, nb, out, i, conntrackCache.Get(f.l))
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) runSendQueue(i int) {
|
||||||
|
queue := f.batches.txQueue(i)
|
||||||
|
if queue == nil {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("queue", i).Debug("tx queue not initialized; batching disabled for writer")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writer := f.writerForIndex(i)
|
||||||
|
if writer == nil {
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("queue", i).Debug("no UDP writer for batch queue")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithField("queue", i).Debug("send queue worker started")
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if f.l.Level >= logrus.WarnLevel {
|
||||||
|
f.l.WithField("queue", i).Warn("send queue worker exited")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
batchCap := f.batches.batchSizeHint()
|
||||||
|
if batchCap <= 0 {
|
||||||
|
batchCap = 1
|
||||||
|
}
|
||||||
|
gsoLimit := f.effectiveGSOMaxSegments()
|
||||||
|
if gsoLimit > batchCap {
|
||||||
|
batchCap = gsoLimit
|
||||||
|
}
|
||||||
|
pending := make([]queuedDatagram, 0, batchCap)
|
||||||
|
var (
|
||||||
|
flushTimer *time.Timer
|
||||||
|
flushC <-chan time.Time
|
||||||
|
)
|
||||||
|
dispatch := func(reason string, timerFired bool) {
|
||||||
|
if len(pending) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
batch := pending
|
||||||
|
f.flushAndReleaseBatch(i, writer, batch, reason)
|
||||||
|
for idx := range batch {
|
||||||
|
batch[idx] = queuedDatagram{}
|
||||||
|
}
|
||||||
|
pending = pending[:0]
|
||||||
|
if flushTimer != nil {
|
||||||
|
if !timerFired {
|
||||||
|
if !flushTimer.Stop() {
|
||||||
|
select {
|
||||||
|
case <-flushTimer.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
flushTimer = nil
|
||||||
|
flushC = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
armTimer := func() {
|
||||||
|
delay := f.currentBatchFlushInterval()
|
||||||
|
if delay <= 0 {
|
||||||
|
dispatch("nogso", false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if flushTimer == nil {
|
||||||
|
flushTimer = time.NewTimer(delay)
|
||||||
|
flushC = flushTimer.C
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case d := <-queue:
|
||||||
|
if d.packet == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithFields(logrus.Fields{
|
||||||
|
"queue": i,
|
||||||
|
"payload_len": d.packet.Len,
|
||||||
|
"dest": d.addr,
|
||||||
|
}).Debug("send queue received packet")
|
||||||
|
}
|
||||||
|
pending = append(pending, d)
|
||||||
|
if gsoLimit > 0 && len(pending) >= gsoLimit {
|
||||||
|
dispatch("gso", false)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(pending) >= cap(pending) {
|
||||||
|
dispatch("cap", false)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
armTimer()
|
||||||
|
f.observeUDPQueueLen(i)
|
||||||
|
case <-flushC:
|
||||||
|
dispatch("timer", true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) runTunWriteQueue(i int) {
|
||||||
|
queue := f.batches.tunQueue(i)
|
||||||
|
if queue == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writer := f.batches.inside
|
||||||
|
if writer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
requiredHeadroom := writer.BatchHeadroom()
|
||||||
|
|
||||||
|
batchCap := f.batches.batchSizeHint()
|
||||||
|
if batchCap <= 0 {
|
||||||
|
batchCap = 1
|
||||||
|
}
|
||||||
|
pending := make([]*overlay.Packet, 0, batchCap)
|
||||||
|
var (
|
||||||
|
flushTimer *time.Timer
|
||||||
|
flushC <-chan time.Time
|
||||||
|
)
|
||||||
|
flush := func(reason string, timerFired bool) {
|
||||||
|
if len(pending) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
valid := pending[:0]
|
||||||
|
for idx := range pending {
|
||||||
|
if !f.ensurePacketHeadroom(&pending[idx], requiredHeadroom, i, reason) {
|
||||||
|
pending[idx] = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pending[idx] != nil {
|
||||||
|
valid = append(valid, pending[idx])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(valid) > 0 {
|
||||||
|
if _, err := writer.WriteBatch(valid); err != nil {
|
||||||
|
f.l.WithError(err).
|
||||||
|
WithField("queue", i).
|
||||||
|
WithField("reason", reason).
|
||||||
|
Warn("Failed to write tun batch")
|
||||||
|
for _, pkt := range valid {
|
||||||
|
if pkt != nil {
|
||||||
|
f.writePacketToTun(i, pkt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pending = pending[:0]
|
||||||
|
if flushTimer != nil {
|
||||||
|
if !timerFired {
|
||||||
|
if !flushTimer.Stop() {
|
||||||
|
select {
|
||||||
|
case <-flushTimer.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
flushTimer = nil
|
||||||
|
flushC = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
armTimer := func() {
|
||||||
|
delay := f.currentBatchFlushInterval()
|
||||||
|
if delay <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if flushTimer == nil {
|
||||||
|
flushTimer = time.NewTimer(delay)
|
||||||
|
flushC = flushTimer.C
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case pkt := <-queue:
|
||||||
|
if pkt == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if f.ensurePacketHeadroom(&pkt, requiredHeadroom, i, "queue") {
|
||||||
|
pending = append(pending, pkt)
|
||||||
|
}
|
||||||
|
if len(pending) >= cap(pending) {
|
||||||
|
flush("cap", false)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
armTimer()
|
||||||
|
f.observeTunQueueLen(i)
|
||||||
|
case <-flushC:
|
||||||
|
flush("timer", true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) flushAndReleaseBatch(index int, writer udp.Conn, batch []queuedDatagram, reason string) {
|
||||||
|
if len(batch) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.flushDatagrams(index, writer, batch, reason)
|
||||||
|
for idx := range batch {
|
||||||
|
if batch[idx].packet != nil {
|
||||||
|
batch[idx].packet.Release()
|
||||||
|
batch[idx].packet = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.batchUDPFlushCounter != nil {
|
||||||
|
f.batchUDPFlushCounter.Inc(int64(len(batch)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) flushDatagrams(index int, writer udp.Conn, batch []queuedDatagram, reason string) {
|
||||||
|
if len(batch) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithFields(logrus.Fields{
|
||||||
|
"writer": index,
|
||||||
|
"reason": reason,
|
||||||
|
"pending": len(batch),
|
||||||
|
}).Debug("udp batch flush summary")
|
||||||
|
}
|
||||||
|
maxSeg := f.effectiveGSOMaxSegments()
|
||||||
|
if bw, ok := writer.(udp.BatchConn); ok {
|
||||||
|
chunkCap := maxSeg
|
||||||
|
if chunkCap <= 0 {
|
||||||
|
chunkCap = len(batch)
|
||||||
|
}
|
||||||
|
chunk := make([]udp.Datagram, 0, chunkCap)
|
||||||
|
var (
|
||||||
|
currentAddr netip.AddrPort
|
||||||
|
segments int
|
||||||
|
)
|
||||||
|
flushChunk := func() {
|
||||||
|
if len(chunk) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithFields(logrus.Fields{
|
||||||
|
"writer": index,
|
||||||
|
"segments": len(chunk),
|
||||||
|
"dest": chunk[0].Addr,
|
||||||
|
"reason": reason,
|
||||||
|
"pending_total": len(batch),
|
||||||
|
}).Debug("flushing UDP batch")
|
||||||
|
}
|
||||||
|
if err := bw.WriteBatch(chunk); err != nil {
|
||||||
|
f.l.WithError(err).
|
||||||
|
WithField("writer", index).
|
||||||
|
WithField("reason", reason).
|
||||||
|
Warn("Failed to write UDP batch")
|
||||||
|
}
|
||||||
|
chunk = chunk[:0]
|
||||||
|
segments = 0
|
||||||
|
}
|
||||||
|
for _, item := range batch {
|
||||||
|
if item.packet == nil || !item.addr.IsValid() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
payload := item.packet.Payload()[:item.packet.Len]
|
||||||
|
if segments == 0 {
|
||||||
|
currentAddr = item.addr
|
||||||
|
}
|
||||||
|
if item.addr != currentAddr || (maxSeg > 0 && segments >= maxSeg) {
|
||||||
|
flushChunk()
|
||||||
|
currentAddr = item.addr
|
||||||
|
}
|
||||||
|
chunk = append(chunk, udp.Datagram{Payload: payload, Addr: item.addr})
|
||||||
|
segments++
|
||||||
|
}
|
||||||
|
flushChunk()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, item := range batch {
|
||||||
|
if item.packet == nil || !item.addr.IsValid() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
|
f.l.WithFields(logrus.Fields{
|
||||||
|
"writer": index,
|
||||||
|
"reason": reason,
|
||||||
|
"dest": item.addr,
|
||||||
|
"segments": 1,
|
||||||
|
}).Debug("flushing UDP batch")
|
||||||
|
}
|
||||||
|
if err := writer.WriteTo(item.packet.Payload()[:item.packet.Len], item.addr); err != nil {
|
||||||
|
f.l.WithError(err).
|
||||||
|
WithField("writer", index).
|
||||||
|
WithField("udpAddr", item.addr).
|
||||||
|
WithField("reason", reason).
|
||||||
|
Warn("Failed to write UDP packet")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) tryQueueDatagram(q int, buf []byte, addr netip.AddrPort) bool {
|
||||||
|
if !addr.IsValid() || !f.batches.Enabled() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
pkt := f.batches.newPacket()
|
||||||
|
if pkt == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
payload := pkt.Payload()
|
||||||
|
if len(payload) < len(buf) {
|
||||||
|
pkt.Release()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
copy(payload, buf)
|
||||||
|
pkt.Len = len(buf)
|
||||||
|
if f.batches.enqueueTx(q, pkt, addr) {
|
||||||
|
f.observeUDPQueueLen(q)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
pkt.Release()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) writerForIndex(i int) udp.Conn {
|
||||||
|
if i < 0 || i >= len(f.writers) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return f.writers[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) writeImmediate(q int, buf []byte, addr netip.AddrPort, hostinfo *HostInfo) {
|
||||||
|
writer := f.writerForIndex(q)
|
||||||
|
if writer == nil {
|
||||||
|
f.l.WithField("udpAddr", addr).
|
||||||
|
WithField("writer", q).
|
||||||
|
Error("Failed to write outgoing packet: no writer available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := writer.WriteTo(buf, addr); err != nil {
|
||||||
|
hostinfo.logger(f.l).
|
||||||
|
WithError(err).
|
||||||
|
WithField("udpAddr", addr).
|
||||||
|
Error("Failed to write outgoing packet")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) tryQueuePacket(q int, pkt *overlay.Packet, addr netip.AddrPort) bool {
|
||||||
|
if pkt == nil || !addr.IsValid() || !f.batches.Enabled() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if f.batches.enqueueTx(q, pkt, addr) {
|
||||||
|
f.observeUDPQueueLen(q)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) writeImmediatePacket(q int, pkt *overlay.Packet, addr netip.AddrPort, hostinfo *HostInfo) {
|
||||||
|
if pkt == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writer := f.writerForIndex(q)
|
||||||
|
if writer == nil {
|
||||||
|
f.l.WithField("udpAddr", addr).
|
||||||
|
WithField("writer", q).
|
||||||
|
Error("Failed to write outgoing packet: no writer available")
|
||||||
|
pkt.Release()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := writer.WriteTo(pkt.Payload()[:pkt.Len], addr); err != nil {
|
||||||
|
hostinfo.logger(f.l).
|
||||||
|
WithError(err).
|
||||||
|
WithField("udpAddr", addr).
|
||||||
|
Error("Failed to write outgoing packet")
|
||||||
|
}
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) writePacketToTun(q int, pkt *overlay.Packet) {
|
||||||
|
if pkt == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writer := f.readers[q]
|
||||||
|
if writer == nil {
|
||||||
|
pkt.Release()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if bw, ok := writer.(interface {
|
||||||
|
WriteBatch([]*overlay.Packet) (int, error)
|
||||||
|
}); ok {
|
||||||
|
if _, err := bw.WriteBatch([]*overlay.Packet{pkt}); err != nil {
|
||||||
|
f.l.WithError(err).WithField("queue", q).Warn("Failed to write tun packet via batch writer")
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := writer.Write(pkt.Payload()[:pkt.Len]); err != nil {
|
||||||
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
|
}
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) clonePacketWithHeadroom(pkt *overlay.Packet, required int) *overlay.Packet {
|
||||||
|
if pkt == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
payload := pkt.Payload()[:pkt.Len]
|
||||||
|
if len(payload) == 0 && required <= 0 {
|
||||||
|
return pkt
|
||||||
|
}
|
||||||
|
|
||||||
|
pool := f.batches.Pool()
|
||||||
|
if pool != nil {
|
||||||
|
if clone := pool.Get(); clone != nil {
|
||||||
|
if len(clone.Payload()) >= len(payload) {
|
||||||
|
clone.Len = copy(clone.Payload(), payload)
|
||||||
|
pkt.Release()
|
||||||
|
return clone
|
||||||
|
}
|
||||||
|
clone.Release()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if required < 0 {
|
||||||
|
required = 0
|
||||||
|
}
|
||||||
|
buf := make([]byte, required+len(payload))
|
||||||
|
n := copy(buf[required:], payload)
|
||||||
|
pkt.Release()
|
||||||
|
return &overlay.Packet{
|
||||||
|
Buf: buf,
|
||||||
|
Offset: required,
|
||||||
|
Len: n,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) observeUDPQueueLen(i int) {
|
||||||
|
if f.batchUDPQueueGauge == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.batchUDPQueueGauge.Update(int64(f.batches.txQueueLen(i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) observeTunQueueLen(i int) {
|
||||||
|
if f.batchTunQueueGauge == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.batchTunQueueGauge.Update(int64(f.batches.tunQueueLen(i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) currentBatchFlushInterval() time.Duration {
|
||||||
|
if v := f.batchFlushInterval.Load(); v > 0 {
|
||||||
|
return time.Duration(v)
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) ensurePacketHeadroom(pkt **overlay.Packet, required int, queue int, reason string) bool {
|
||||||
|
p := *pkt
|
||||||
|
if p == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if required <= 0 || p.Offset >= required {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
clone := f.clonePacketWithHeadroom(p, required)
|
||||||
|
if clone == nil {
|
||||||
|
f.l.WithFields(logrus.Fields{
|
||||||
|
"queue": queue,
|
||||||
|
"reason": reason,
|
||||||
|
}).Warn("dropping packet lacking tun headroom")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
*pkt = clone
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isVirtioHeadroomError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
msg := err.Error()
|
||||||
|
return strings.Contains(msg, "headroom") || strings.Contains(msg, "virtio")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) effectiveGSOMaxSegments() int {
|
||||||
|
max := f.gsoMaxSegments
|
||||||
|
if max <= 0 {
|
||||||
|
max = defaultGSOMaxSegments
|
||||||
|
}
|
||||||
|
if max > maxKernelGSOSegments {
|
||||||
|
max = maxKernelGSOSegments
|
||||||
|
}
|
||||||
|
if !f.enableGSO {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
|
||||||
|
type udpOffloadConfigurator interface {
|
||||||
|
ConfigureOffload(enableGSO, enableGRO bool, maxSegments int)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Interface) applyOffloadConfig(enableGSO, enableGRO bool, maxSegments int) {
|
||||||
|
if maxSegments <= 0 {
|
||||||
|
maxSegments = defaultGSOMaxSegments
|
||||||
|
}
|
||||||
|
if maxSegments > maxKernelGSOSegments {
|
||||||
|
maxSegments = maxKernelGSOSegments
|
||||||
|
}
|
||||||
|
f.enableGSO = enableGSO
|
||||||
|
f.enableGRO = enableGRO
|
||||||
|
f.gsoMaxSegments = maxSegments
|
||||||
|
for _, writer := range f.writers {
|
||||||
|
if cfg, ok := writer.(udpOffloadConfigurator); ok {
|
||||||
|
cfg.ConfigureOffload(enableGSO, enableGRO, maxSegments)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
|
func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
|
||||||
c.RegisterReloadCallback(f.reloadFirewall)
|
c.RegisterReloadCallback(f.reloadFirewall)
|
||||||
c.RegisterReloadCallback(f.reloadSendRecvError)
|
c.RegisterReloadCallback(f.reloadSendRecvError)
|
||||||
@@ -401,6 +1062,42 @@ func (f *Interface) reloadMisc(c *config.C) {
|
|||||||
f.reQueryWait.Store(int64(n))
|
f.reQueryWait.Store(int64(n))
|
||||||
f.l.Info("timers.requery_wait_duration has changed")
|
f.l.Info("timers.requery_wait_duration has changed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.HasChanged("listen.gso_flush_timeout") {
|
||||||
|
d := c.GetDuration("listen.gso_flush_timeout", defaultGSOFlushInterval)
|
||||||
|
if d < 0 {
|
||||||
|
d = 0
|
||||||
|
}
|
||||||
|
f.batchFlushInterval.Store(int64(d))
|
||||||
|
f.l.WithField("duration", d).Info("listen.gso_flush_timeout has changed")
|
||||||
|
} else if c.HasChanged("batch.flush_interval") {
|
||||||
|
d := c.GetDuration("batch.flush_interval", defaultGSOFlushInterval)
|
||||||
|
if d < 0 {
|
||||||
|
d = 0
|
||||||
|
}
|
||||||
|
f.batchFlushInterval.Store(int64(d))
|
||||||
|
f.l.WithField("duration", d).Warn("batch.flush_interval is deprecated; use listen.gso_flush_timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.HasChanged("batch.queue_depth") {
|
||||||
|
n := c.GetInt("batch.queue_depth", f.batchQueueDepth)
|
||||||
|
if n != f.batchQueueDepth {
|
||||||
|
f.batchQueueDepth = n
|
||||||
|
f.l.Warn("batch.queue_depth changes require a restart to take effect")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.HasChanged("listen.enable_gso") || c.HasChanged("listen.enable_gro") || c.HasChanged("listen.gso_max_segments") {
|
||||||
|
enableGSO := c.GetBool("listen.enable_gso", f.enableGSO)
|
||||||
|
enableGRO := c.GetBool("listen.enable_gro", f.enableGRO)
|
||||||
|
maxSeg := c.GetInt("listen.gso_max_segments", f.gsoMaxSegments)
|
||||||
|
f.applyOffloadConfig(enableGSO, enableGRO, maxSeg)
|
||||||
|
f.l.WithFields(logrus.Fields{
|
||||||
|
"enableGSO": enableGSO,
|
||||||
|
"enableGRO": enableGRO,
|
||||||
|
"gsoMaxSegments": maxSeg,
|
||||||
|
}).Info("listen GSO/GRO configuration updated")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||||
|
|||||||
249
lighthouse.go
249
lighthouse.go
@@ -9,6 +9,7 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -20,16 +21,16 @@ import (
|
|||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
"github.com/slackhq/nebula/udp"
|
"github.com/slackhq/nebula/udp"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/util"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrHostNotKnown = errors.New("host not known")
|
var ErrHostNotKnown = errors.New("host not known")
|
||||||
|
var ErrBadDetailsVpnAddr = errors.New("invalid packet, malformed detailsVpnAddr")
|
||||||
|
|
||||||
type LightHouse struct {
|
type LightHouse struct {
|
||||||
//TODO: We need a timer wheel to kick out vpnAddrs that haven't reported in a long time
|
//TODO: We need a timer wheel to kick out vpnAddrs that haven't reported in a long time
|
||||||
synctrace.RWMutex //Because we concurrently read and write to our maps
|
sync.RWMutex //Because we concurrently read and write to our maps
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
amLighthouse bool
|
amLighthouse bool
|
||||||
|
|
||||||
myVpnNetworks []netip.Prefix
|
myVpnNetworks []netip.Prefix
|
||||||
myVpnNetworksTable *bart.Lite
|
myVpnNetworksTable *bart.Lite
|
||||||
@@ -56,7 +57,7 @@ type LightHouse struct {
|
|||||||
// staticList exists to avoid having a bool in each addrMap entry
|
// staticList exists to avoid having a bool in each addrMap entry
|
||||||
// since static should be rare
|
// since static should be rare
|
||||||
staticList atomic.Pointer[map[netip.Addr]struct{}]
|
staticList atomic.Pointer[map[netip.Addr]struct{}]
|
||||||
lighthouses atomic.Pointer[map[netip.Addr]struct{}]
|
lighthouses atomic.Pointer[[]netip.Addr]
|
||||||
|
|
||||||
interval atomic.Int64
|
interval atomic.Int64
|
||||||
updateCancel context.CancelFunc
|
updateCancel context.CancelFunc
|
||||||
@@ -96,7 +97,6 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
|
|||||||
}
|
}
|
||||||
|
|
||||||
h := LightHouse{
|
h := LightHouse{
|
||||||
RWMutex: synctrace.NewRWMutex("lighthouse"),
|
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
amLighthouse: amLighthouse,
|
amLighthouse: amLighthouse,
|
||||||
myVpnNetworks: cs.myVpnNetworks,
|
myVpnNetworks: cs.myVpnNetworks,
|
||||||
@@ -108,7 +108,7 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
|
|||||||
queryChan: make(chan netip.Addr, c.GetUint32("handshakes.query_buffer", 64)),
|
queryChan: make(chan netip.Addr, c.GetUint32("handshakes.query_buffer", 64)),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
lighthouses := make(map[netip.Addr]struct{})
|
lighthouses := make([]netip.Addr, 0)
|
||||||
h.lighthouses.Store(&lighthouses)
|
h.lighthouses.Store(&lighthouses)
|
||||||
staticList := make(map[netip.Addr]struct{})
|
staticList := make(map[netip.Addr]struct{})
|
||||||
h.staticList.Store(&staticList)
|
h.staticList.Store(&staticList)
|
||||||
@@ -144,7 +144,7 @@ func (lh *LightHouse) GetStaticHostList() map[netip.Addr]struct{} {
|
|||||||
return *lh.staticList.Load()
|
return *lh.staticList.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) GetLighthouses() map[netip.Addr]struct{} {
|
func (lh *LightHouse) GetLighthouses() []netip.Addr {
|
||||||
return *lh.lighthouses.Load()
|
return *lh.lighthouses.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -307,13 +307,12 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if initial || c.HasChanged("lighthouse.hosts") {
|
if initial || c.HasChanged("lighthouse.hosts") {
|
||||||
lhMap := make(map[netip.Addr]struct{})
|
lhList, err := lh.parseLighthouses(c)
|
||||||
err := lh.parseLighthouses(c, lhMap)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
lh.lighthouses.Store(&lhMap)
|
lh.lighthouses.Store(&lhList)
|
||||||
if !initial {
|
if !initial {
|
||||||
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
|
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
|
||||||
lh.l.Info("lighthouse.hosts has changed")
|
lh.l.Info("lighthouse.hosts has changed")
|
||||||
@@ -347,36 +346,37 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) parseLighthouses(c *config.C, lhMap map[netip.Addr]struct{}) error {
|
func (lh *LightHouse) parseLighthouses(c *config.C) ([]netip.Addr, error) {
|
||||||
lhs := c.GetStringSlice("lighthouse.hosts", []string{})
|
lhs := c.GetStringSlice("lighthouse.hosts", []string{})
|
||||||
if lh.amLighthouse && len(lhs) != 0 {
|
if lh.amLighthouse && len(lhs) != 0 {
|
||||||
lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
||||||
}
|
}
|
||||||
|
out := make([]netip.Addr, len(lhs))
|
||||||
|
|
||||||
for i, host := range lhs {
|
for i, host := range lhs {
|
||||||
addr, err := netip.ParseAddr(host)
|
addr, err := netip.ParseAddr(host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err)
|
return nil, util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !lh.myVpnNetworksTable.Contains(addr) {
|
if !lh.myVpnNetworksTable.Contains(addr) {
|
||||||
return util.NewContextualError("lighthouse host is not in our networks, invalid", m{"vpnAddr": addr, "networks": lh.myVpnNetworks}, nil)
|
return nil, util.NewContextualError("lighthouse host is not in our networks, invalid", m{"vpnAddr": addr, "networks": lh.myVpnNetworks}, nil)
|
||||||
}
|
}
|
||||||
lhMap[addr] = struct{}{}
|
out[i] = addr
|
||||||
}
|
}
|
||||||
|
|
||||||
if !lh.amLighthouse && len(lhMap) == 0 {
|
if !lh.amLighthouse && len(out) == 0 {
|
||||||
lh.l.Warn("No lighthouse.hosts configured, this host will only be able to initiate tunnels with static_host_map entries")
|
lh.l.Warn("No lighthouse.hosts configured, this host will only be able to initiate tunnels with static_host_map entries")
|
||||||
}
|
}
|
||||||
|
|
||||||
staticList := lh.GetStaticHostList()
|
staticList := lh.GetStaticHostList()
|
||||||
for lhAddr, _ := range lhMap {
|
for i := range out {
|
||||||
if _, ok := staticList[lhAddr]; !ok {
|
if _, ok := staticList[out[i]]; !ok {
|
||||||
return fmt.Errorf("lighthouse %s does not have a static_host_map entry", lhAddr)
|
return nil, fmt.Errorf("lighthouse %s does not have a static_host_map entry", out[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getStaticMapCadence(c *config.C) (time.Duration, error) {
|
func getStaticMapCadence(c *config.C) (time.Duration, error) {
|
||||||
@@ -473,7 +473,6 @@ func (lh *LightHouse) QueryServer(vpnAddr netip.Addr) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
synctrace.ChanDebugSend("lighthouse-querychan")
|
|
||||||
lh.queryChan <- vpnAddr
|
lh.queryChan <- vpnAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,7 +487,7 @@ func (lh *LightHouse) QueryCache(vpnAddrs []netip.Addr) *RemoteList {
|
|||||||
lh.Lock()
|
lh.Lock()
|
||||||
defer lh.Unlock()
|
defer lh.Unlock()
|
||||||
// Add an entry if we don't already have one
|
// Add an entry if we don't already have one
|
||||||
return lh.unlockedGetRemoteList(vpnAddrs)
|
return lh.unlockedGetRemoteList(vpnAddrs) //todo CERT-V2 this contains addrmap lookups we could potentially skip
|
||||||
}
|
}
|
||||||
|
|
||||||
// queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing
|
// queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing
|
||||||
@@ -521,11 +520,15 @@ func (lh *LightHouse) queryAndPrepMessage(vpnAddr netip.Addr, f func(*cache) (in
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) DeleteVpnAddrs(allVpnAddrs []netip.Addr) {
|
func (lh *LightHouse) DeleteVpnAddrs(allVpnAddrs []netip.Addr) {
|
||||||
// First we check the static mapping
|
// First we check the static host map. If any of the VpnAddrs to be deleted are present, do nothing.
|
||||||
// and do nothing if it is there
|
staticList := lh.GetStaticHostList()
|
||||||
if _, ok := lh.GetStaticHostList()[allVpnAddrs[0]]; ok {
|
for _, addr := range allVpnAddrs {
|
||||||
return
|
if _, ok := staticList[addr]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// None of the VpnAddrs were present. Now we can do the deletes.
|
||||||
lh.Lock()
|
lh.Lock()
|
||||||
rm, ok := lh.addrMap[allVpnAddrs[0]]
|
rm, ok := lh.addrMap[allVpnAddrs[0]]
|
||||||
if ok {
|
if ok {
|
||||||
@@ -567,7 +570,7 @@ func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, t
|
|||||||
am.unlockedSetHostnamesResults(hr)
|
am.unlockedSetHostnamesResults(hr)
|
||||||
|
|
||||||
for _, addrPort := range hr.GetAddrs() {
|
for _, addrPort := range hr.GetAddrs() {
|
||||||
if !lh.shouldAdd(vpnAddr, addrPort.Addr()) {
|
if !lh.shouldAdd([]netip.Addr{vpnAddr}, addrPort.Addr()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
@@ -629,23 +632,30 @@ func (lh *LightHouse) addCalculatedRemotes(vpnAddr netip.Addr) bool {
|
|||||||
return len(calculatedV4) > 0 || len(calculatedV6) > 0
|
return len(calculatedV4) > 0 || len(calculatedV6) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// unlockedGetRemoteList
|
// unlockedGetRemoteList assumes you have the lh lock
|
||||||
// assumes you have the lh lock
|
|
||||||
func (lh *LightHouse) unlockedGetRemoteList(allAddrs []netip.Addr) *RemoteList {
|
func (lh *LightHouse) unlockedGetRemoteList(allAddrs []netip.Addr) *RemoteList {
|
||||||
am, ok := lh.addrMap[allAddrs[0]]
|
// before we go and make a new remotelist, we need to make sure we don't have one for any of this set of vpnaddrs yet
|
||||||
if !ok {
|
for i, addr := range allAddrs {
|
||||||
am = NewRemoteList(allAddrs, func(a netip.Addr) bool { return lh.shouldAdd(allAddrs[0], a) })
|
am, ok := lh.addrMap[addr]
|
||||||
for _, addr := range allAddrs {
|
if ok {
|
||||||
lh.addrMap[addr] = am
|
if i != 0 {
|
||||||
|
lh.addrMap[allAddrs[0]] = am
|
||||||
|
}
|
||||||
|
return am
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
am := NewRemoteList(allAddrs, lh.shouldAdd)
|
||||||
|
for _, addr := range allAddrs {
|
||||||
|
lh.addrMap[addr] = am
|
||||||
|
}
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) shouldAdd(vpnAddr netip.Addr, to netip.Addr) bool {
|
func (lh *LightHouse) shouldAdd(vpnAddrs []netip.Addr, to netip.Addr) bool {
|
||||||
allow := lh.GetRemoteAllowList().Allow(vpnAddr, to)
|
allow := lh.GetRemoteAllowList().AllowAll(vpnAddrs, to)
|
||||||
if lh.l.Level >= logrus.TraceLevel {
|
if lh.l.Level >= logrus.TraceLevel {
|
||||||
lh.l.WithField("vpnAddr", vpnAddr).WithField("udpAddr", to).WithField("allow", allow).
|
lh.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", to).WithField("allow", allow).
|
||||||
Trace("remoteAllowList.Allow")
|
Trace("remoteAllowList.Allow")
|
||||||
}
|
}
|
||||||
if !allow {
|
if !allow {
|
||||||
@@ -700,19 +710,22 @@ func (lh *LightHouse) unlockedShouldAddV6(vpnAddr netip.Addr, to *V6AddrPort) bo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (lh *LightHouse) IsLighthouseAddr(vpnAddr netip.Addr) bool {
|
func (lh *LightHouse) IsLighthouseAddr(vpnAddr netip.Addr) bool {
|
||||||
if _, ok := lh.GetLighthouses()[vpnAddr]; ok {
|
l := lh.GetLighthouses()
|
||||||
return true
|
for i := range l {
|
||||||
|
if l[i] == vpnAddr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: CERT-V2 IsLighthouseAddr should be sufficient, we just need to update the vpnAddrs for lighthouses after a handshake
|
func (lh *LightHouse) IsAnyLighthouseAddr(vpnAddrs []netip.Addr) bool {
|
||||||
// so that we know all the lighthouse vpnAddrs, not just the ones we were configured to talk to initially
|
|
||||||
func (lh *LightHouse) IsAnyLighthouseAddr(vpnAddr []netip.Addr) bool {
|
|
||||||
l := lh.GetLighthouses()
|
l := lh.GetLighthouses()
|
||||||
for _, a := range vpnAddr {
|
for i := range vpnAddrs {
|
||||||
if _, ok := l[a]; ok {
|
for j := range l {
|
||||||
return true
|
if l[j] == vpnAddrs[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@@ -727,11 +740,9 @@ func (lh *LightHouse) startQueryWorker() {
|
|||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
|
|
||||||
synctrace.ChanDebugRecvLock("lighthouse-querychan")
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-lh.ctx.Done():
|
case <-lh.ctx.Done():
|
||||||
synctrace.ChanDebugRecvUnlock("lighthouse-querychan")
|
|
||||||
return
|
return
|
||||||
case addr := <-lh.queryChan:
|
case addr := <-lh.queryChan:
|
||||||
lh.innerQueryServer(addr, nb, out)
|
lh.innerQueryServer(addr, nb, out)
|
||||||
@@ -756,7 +767,7 @@ func (lh *LightHouse) innerQueryServer(addr netip.Addr, nb, out []byte) {
|
|||||||
queried := 0
|
queried := 0
|
||||||
lighthouses := lh.GetLighthouses()
|
lighthouses := lh.GetLighthouses()
|
||||||
|
|
||||||
for lhVpnAddr := range lighthouses {
|
for _, lhVpnAddr := range lighthouses {
|
||||||
hi := lh.ifce.GetHostInfo(lhVpnAddr)
|
hi := lh.ifce.GetHostInfo(lhVpnAddr)
|
||||||
if hi != nil {
|
if hi != nil {
|
||||||
v = hi.ConnectionState.myCert.Version()
|
v = hi.ConnectionState.myCert.Version()
|
||||||
@@ -874,7 +885,7 @@ func (lh *LightHouse) SendUpdate() {
|
|||||||
updated := 0
|
updated := 0
|
||||||
lighthouses := lh.GetLighthouses()
|
lighthouses := lh.GetLighthouses()
|
||||||
|
|
||||||
for lhVpnAddr := range lighthouses {
|
for _, lhVpnAddr := range lighthouses {
|
||||||
var v cert.Version
|
var v cert.Version
|
||||||
hi := lh.ifce.GetHostInfo(lhVpnAddr)
|
hi := lh.ifce.GetHostInfo(lhVpnAddr)
|
||||||
if hi != nil {
|
if hi != nil {
|
||||||
@@ -932,7 +943,6 @@ func (lh *LightHouse) SendUpdate() {
|
|||||||
V4AddrPorts: v4,
|
V4AddrPorts: v4,
|
||||||
V6AddrPorts: v6,
|
V6AddrPorts: v6,
|
||||||
RelayVpnAddrs: relays,
|
RelayVpnAddrs: relays,
|
||||||
VpnAddr: netAddrToProtoAddr(lh.myVpnNetworks[0].Addr()),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1052,19 +1062,19 @@ func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, fromVpnAddrs []neti
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
useVersion := cert.Version1
|
queryVpnAddr, useVersion, err := n.Details.GetVpnAddrAndVersion()
|
||||||
var queryVpnAddr netip.Addr
|
if err != nil {
|
||||||
if n.Details.OldVpnAddr != 0 {
|
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
|
||||||
queryVpnAddr = netip.AddrFrom4(b)
|
|
||||||
useVersion = 1
|
|
||||||
} else if n.Details.VpnAddr != nil {
|
|
||||||
queryVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
|
||||||
useVersion = 2
|
|
||||||
} else {
|
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.WithField("from", fromVpnAddrs).WithField("details", n.Details).Debugln("Dropping malformed HostQuery")
|
lhh.l.WithField("from", fromVpnAddrs).WithField("details", n.Details).
|
||||||
|
Debugln("Dropping malformed HostQuery")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if useVersion == cert.Version1 && queryVpnAddr.Is6() {
|
||||||
|
// this case really shouldn't be possible to represent, but reject it anyway.
|
||||||
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
|
lhh.l.WithField("vpnAddrs", fromVpnAddrs).WithField("queryVpnAddr", queryVpnAddr).
|
||||||
|
Debugln("invalid vpn addr for v1 handleHostQuery")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1073,9 +1083,6 @@ func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, fromVpnAddrs []neti
|
|||||||
n = lhh.resetMeta()
|
n = lhh.resetMeta()
|
||||||
n.Type = NebulaMeta_HostQueryReply
|
n.Type = NebulaMeta_HostQueryReply
|
||||||
if useVersion == cert.Version1 {
|
if useVersion == cert.Version1 {
|
||||||
if !queryVpnAddr.Is4() {
|
|
||||||
return 0, fmt.Errorf("invalid vpn addr for v1 handleHostQuery")
|
|
||||||
}
|
|
||||||
b := queryVpnAddr.As4()
|
b := queryVpnAddr.As4()
|
||||||
n.Details.OldVpnAddr = binary.BigEndian.Uint32(b[:])
|
n.Details.OldVpnAddr = binary.BigEndian.Uint32(b[:])
|
||||||
} else {
|
} else {
|
||||||
@@ -1120,8 +1127,9 @@ func (lhh *LightHouseHandler) sendHostPunchNotification(n *NebulaMeta, fromVpnAd
|
|||||||
if ok {
|
if ok {
|
||||||
whereToPunch = newDest
|
whereToPunch = newDest
|
||||||
} else {
|
} else {
|
||||||
//TODO: CERT-V2 this means the destination will have no addresses in common with the punch-ee
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
//choosing to do nothing for now, but maybe we return an error?
|
lhh.l.WithField("to", crt.Networks()).Debugln("unable to punch to host, no addresses in common")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1180,19 +1188,17 @@ func (lhh *LightHouseHandler) coalesceAnswers(v cert.Version, c *cache, n *Nebul
|
|||||||
if !r.Is4() {
|
if !r.Is4() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
b = r.As4()
|
b = r.As4()
|
||||||
n.Details.OldRelayVpnAddrs = append(n.Details.OldRelayVpnAddrs, binary.BigEndian.Uint32(b[:]))
|
n.Details.OldRelayVpnAddrs = append(n.Details.OldRelayVpnAddrs, binary.BigEndian.Uint32(b[:]))
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if v == cert.Version2 {
|
} else if v == cert.Version2 {
|
||||||
for _, r := range c.relay.relay {
|
for _, r := range c.relay.relay {
|
||||||
n.Details.RelayVpnAddrs = append(n.Details.RelayVpnAddrs, netAddrToProtoAddr(r))
|
n.Details.RelayVpnAddrs = append(n.Details.RelayVpnAddrs, netAddrToProtoAddr(r))
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
//TODO: CERT-V2 don't panic
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
panic("unsupported version")
|
lhh.l.WithField("version", v).Debug("unsupported protocol version")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1202,18 +1208,16 @@ func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, fromVpnAddrs [
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lhh.lh.Lock()
|
certVpnAddr, _, err := n.Details.GetVpnAddrAndVersion()
|
||||||
|
if err != nil {
|
||||||
var certVpnAddr netip.Addr
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
if n.Details.OldVpnAddr != 0 {
|
lhh.l.WithError(err).WithField("vpnAddrs", fromVpnAddrs).Error("dropping malformed HostQueryReply")
|
||||||
b := [4]byte{}
|
}
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
return
|
||||||
certVpnAddr = netip.AddrFrom4(b)
|
|
||||||
} else if n.Details.VpnAddr != nil {
|
|
||||||
certVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
|
||||||
}
|
}
|
||||||
relays := n.Details.GetRelays()
|
relays := n.Details.GetRelays()
|
||||||
|
|
||||||
|
lhh.lh.Lock()
|
||||||
am := lhh.lh.unlockedGetRemoteList([]netip.Addr{certVpnAddr})
|
am := lhh.lh.unlockedGetRemoteList([]netip.Addr{certVpnAddr})
|
||||||
am.Lock()
|
am.Lock()
|
||||||
lhh.lh.Unlock()
|
lhh.lh.Unlock()
|
||||||
@@ -1238,27 +1242,24 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, fromVp
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// not using GetVpnAddrAndVersion because we don't want to error on a blank detailsVpnAddr
|
||||||
var detailsVpnAddr netip.Addr
|
var detailsVpnAddr netip.Addr
|
||||||
useVersion := cert.Version1
|
var useVersion cert.Version
|
||||||
if n.Details.OldVpnAddr != 0 {
|
if n.Details.OldVpnAddr != 0 { //v1 always sets this field
|
||||||
b := [4]byte{}
|
b := [4]byte{}
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
||||||
detailsVpnAddr = netip.AddrFrom4(b)
|
detailsVpnAddr = netip.AddrFrom4(b)
|
||||||
useVersion = cert.Version1
|
useVersion = cert.Version1
|
||||||
} else if n.Details.VpnAddr != nil {
|
} else if n.Details.VpnAddr != nil { //this field is "optional" in v2, but if it's set, we should enforce it
|
||||||
detailsVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
detailsVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
||||||
useVersion = cert.Version2
|
useVersion = cert.Version2
|
||||||
} else {
|
} else {
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
detailsVpnAddr = netip.Addr{}
|
||||||
lhh.l.WithField("details", n.Details).Debugf("dropping invalid HostUpdateNotification")
|
useVersion = cert.Version2
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: CERT-V2 hosts with only v2 certs cannot provide their ipv6 addr when contacting the lighthouse via v4?
|
//Simple check that the host sent this not someone else, if detailsVpnAddr is filled
|
||||||
//TODO: CERT-V2 why do we care about the vpnAddr in the packet? We know where it came from, right?
|
if detailsVpnAddr.IsValid() && !slices.Contains(fromVpnAddrs, detailsVpnAddr) {
|
||||||
//Simple check that the host sent this not someone else
|
|
||||||
if !slices.Contains(fromVpnAddrs, detailsVpnAddr) {
|
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.WithField("vpnAddrs", fromVpnAddrs).WithField("answer", detailsVpnAddr).Debugln("Host sent invalid update")
|
lhh.l.WithField("vpnAddrs", fromVpnAddrs).WithField("answer", detailsVpnAddr).Debugln("Host sent invalid update")
|
||||||
}
|
}
|
||||||
@@ -1272,24 +1273,24 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, fromVp
|
|||||||
am.Lock()
|
am.Lock()
|
||||||
lhh.lh.Unlock()
|
lhh.lh.Unlock()
|
||||||
|
|
||||||
am.unlockedSetV4(fromVpnAddrs[0], detailsVpnAddr, n.Details.V4AddrPorts, lhh.lh.unlockedShouldAddV4)
|
am.unlockedSetV4(fromVpnAddrs[0], fromVpnAddrs[0], n.Details.V4AddrPorts, lhh.lh.unlockedShouldAddV4)
|
||||||
am.unlockedSetV6(fromVpnAddrs[0], detailsVpnAddr, n.Details.V6AddrPorts, lhh.lh.unlockedShouldAddV6)
|
am.unlockedSetV6(fromVpnAddrs[0], fromVpnAddrs[0], n.Details.V6AddrPorts, lhh.lh.unlockedShouldAddV6)
|
||||||
am.unlockedSetRelay(fromVpnAddrs[0], relays)
|
am.unlockedSetRelay(fromVpnAddrs[0], relays)
|
||||||
am.Unlock()
|
am.Unlock()
|
||||||
|
|
||||||
n = lhh.resetMeta()
|
n = lhh.resetMeta()
|
||||||
n.Type = NebulaMeta_HostUpdateNotificationAck
|
n.Type = NebulaMeta_HostUpdateNotificationAck
|
||||||
|
switch useVersion {
|
||||||
if useVersion == cert.Version1 {
|
case cert.Version1:
|
||||||
if !fromVpnAddrs[0].Is4() {
|
if !fromVpnAddrs[0].Is4() {
|
||||||
lhh.l.WithField("vpnAddrs", fromVpnAddrs).Error("Can not send HostUpdateNotificationAck for a ipv6 vpn ip in a v1 message")
|
lhh.l.WithField("vpnAddrs", fromVpnAddrs).Error("Can not send HostUpdateNotificationAck for a ipv6 vpn ip in a v1 message")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
vpnAddrB := fromVpnAddrs[0].As4()
|
vpnAddrB := fromVpnAddrs[0].As4()
|
||||||
n.Details.OldVpnAddr = binary.BigEndian.Uint32(vpnAddrB[:])
|
n.Details.OldVpnAddr = binary.BigEndian.Uint32(vpnAddrB[:])
|
||||||
} else if useVersion == cert.Version2 {
|
case cert.Version2:
|
||||||
n.Details.VpnAddr = netAddrToProtoAddr(fromVpnAddrs[0])
|
// do nothing, we want to send a blank message
|
||||||
} else {
|
default:
|
||||||
lhh.l.WithField("useVersion", useVersion).Error("invalid protocol version")
|
lhh.l.WithField("useVersion", useVersion).Error("invalid protocol version")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1307,13 +1308,20 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, fromVp
|
|||||||
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, fromVpnAddrs []netip.Addr, w EncWriter) {
|
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, fromVpnAddrs []netip.Addr, w EncWriter) {
|
||||||
//It's possible the lighthouse is communicating with us using a non primary vpn addr,
|
//It's possible the lighthouse is communicating with us using a non primary vpn addr,
|
||||||
//which means we need to compare all fromVpnAddrs against all configured lighthouse vpn addrs.
|
//which means we need to compare all fromVpnAddrs against all configured lighthouse vpn addrs.
|
||||||
//maybe one day we'll have a better idea, if it matters.
|
|
||||||
if !lhh.lh.IsAnyLighthouseAddr(fromVpnAddrs) {
|
if !lhh.lh.IsAnyLighthouseAddr(fromVpnAddrs) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
detailsVpnAddr, _, err := n.Details.GetVpnAddrAndVersion()
|
||||||
|
if err != nil {
|
||||||
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
|
lhh.l.WithField("details", n.Details).WithError(err).Debugln("dropping invalid HostPunchNotification")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
empty := []byte{0}
|
empty := []byte{0}
|
||||||
punch := func(vpnPeer netip.AddrPort) {
|
punch := func(vpnPeer netip.AddrPort, logVpnAddr netip.Addr) {
|
||||||
if !vpnPeer.IsValid() {
|
if !vpnPeer.IsValid() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1325,48 +1333,31 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, fromVpn
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
var logVpnAddr netip.Addr
|
|
||||||
if n.Details.OldVpnAddr != 0 {
|
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
|
||||||
logVpnAddr = netip.AddrFrom4(b)
|
|
||||||
} else if n.Details.VpnAddr != nil {
|
|
||||||
logVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
|
||||||
}
|
|
||||||
lhh.l.Debugf("Punching on %v for %v", vpnPeer, logVpnAddr)
|
lhh.l.Debugf("Punching on %v for %v", vpnPeer, logVpnAddr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, a := range n.Details.V4AddrPorts {
|
for _, a := range n.Details.V4AddrPorts {
|
||||||
punch(protoV4AddrPortToNetAddrPort(a))
|
punch(protoV4AddrPortToNetAddrPort(a), detailsVpnAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, a := range n.Details.V6AddrPorts {
|
for _, a := range n.Details.V6AddrPorts {
|
||||||
punch(protoV6AddrPortToNetAddrPort(a))
|
punch(protoV6AddrPortToNetAddrPort(a), detailsVpnAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This sends a nebula test packet to the host trying to contact us. In the case
|
// This sends a nebula test packet to the host trying to contact us. In the case
|
||||||
// of a double nat or other difficult scenario, this may help establish
|
// of a double nat or other difficult scenario, this may help establish
|
||||||
// a tunnel.
|
// a tunnel.
|
||||||
if lhh.lh.punchy.GetRespond() {
|
if lhh.lh.punchy.GetRespond() {
|
||||||
var queryVpnAddr netip.Addr
|
|
||||||
if n.Details.OldVpnAddr != 0 {
|
|
||||||
b := [4]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
|
||||||
queryVpnAddr = netip.AddrFrom4(b)
|
|
||||||
} else if n.Details.VpnAddr != nil {
|
|
||||||
queryVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(lhh.lh.punchy.GetRespondDelay())
|
time.Sleep(lhh.lh.punchy.GetRespondDelay())
|
||||||
if lhh.l.Level >= logrus.DebugLevel {
|
if lhh.l.Level >= logrus.DebugLevel {
|
||||||
lhh.l.Debugf("Sending a nebula test packet to vpn addr %s", queryVpnAddr)
|
lhh.l.Debugf("Sending a nebula test packet to vpn addr %s", detailsVpnAddr)
|
||||||
}
|
}
|
||||||
//NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine
|
//NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine
|
||||||
// for each punchBack packet. We should move this into a timerwheel or a single goroutine
|
// for each punchBack packet. We should move this into a timerwheel or a single goroutine
|
||||||
// managed by a channel.
|
// managed by a channel.
|
||||||
w.SendMessageToVpnAddr(header.Test, header.TestRequest, queryVpnAddr, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
w.SendMessageToVpnAddr(header.Test, header.TestRequest, detailsVpnAddr, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1445,3 +1436,17 @@ func findNetworkUnion(prefixes []netip.Prefix, addrs []netip.Addr) (netip.Addr,
|
|||||||
}
|
}
|
||||||
return netip.Addr{}, false
|
return netip.Addr{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *NebulaMetaDetails) GetVpnAddrAndVersion() (netip.Addr, cert.Version, error) {
|
||||||
|
if d.OldVpnAddr != 0 {
|
||||||
|
b := [4]byte{}
|
||||||
|
binary.BigEndian.PutUint32(b[:], d.OldVpnAddr)
|
||||||
|
detailsVpnAddr := netip.AddrFrom4(b)
|
||||||
|
return detailsVpnAddr, cert.Version1, nil
|
||||||
|
} else if d.VpnAddr != nil {
|
||||||
|
detailsVpnAddr := protoAddrToNetAddr(d.VpnAddr)
|
||||||
|
return detailsVpnAddr, cert.Version2, nil
|
||||||
|
} else {
|
||||||
|
return netip.Addr{}, cert.Version1, ErrBadDetailsVpnAddr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -493,3 +493,123 @@ func Test_findNetworkUnion(t *testing.T) {
|
|||||||
out, ok = findNetworkUnion([]netip.Prefix{fc00}, []netip.Addr{a1, afe81})
|
out, ok = findNetworkUnion([]netip.Prefix{fc00}, []netip.Addr{a1, afe81})
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLighthouse_Dont_Delete_Static_Hosts(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
|
||||||
|
myUdpAddr2 := netip.MustParseAddrPort("1.2.3.4:4242")
|
||||||
|
|
||||||
|
testSameHostNotStatic := netip.MustParseAddr("10.128.0.41")
|
||||||
|
testStaticHost := netip.MustParseAddr("10.128.0.42")
|
||||||
|
//myVpnIp := netip.MustParseAddr("10.128.0.2")
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
lh1 := "10.128.0.2"
|
||||||
|
c.Settings["lighthouse"] = map[string]any{
|
||||||
|
"hosts": []any{lh1},
|
||||||
|
"interval": "1s",
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Settings["listen"] = map[string]any{"port": 4242}
|
||||||
|
c.Settings["static_host_map"] = map[string]any{
|
||||||
|
lh1: []any{"1.1.1.1:4242"},
|
||||||
|
"10.128.0.42": []any{"1.2.3.4:4242"},
|
||||||
|
}
|
||||||
|
|
||||||
|
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
||||||
|
nt := new(bart.Lite)
|
||||||
|
nt.Insert(myVpnNet)
|
||||||
|
cs := &CertState{
|
||||||
|
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||||
|
myVpnNetworksTable: nt,
|
||||||
|
}
|
||||||
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
lh.ifce = &mockEncWriter{}
|
||||||
|
|
||||||
|
//test that we actually have the static entry:
|
||||||
|
out := lh.Query(testStaticHost)
|
||||||
|
assert.NotNil(t, out)
|
||||||
|
assert.Equal(t, out.vpnAddrs[0], testStaticHost)
|
||||||
|
out.Rebuild([]netip.Prefix{}) //why tho
|
||||||
|
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||||
|
|
||||||
|
//bolt on a lower numbered primary IP
|
||||||
|
am := lh.unlockedGetRemoteList([]netip.Addr{testStaticHost})
|
||||||
|
am.vpnAddrs = []netip.Addr{testSameHostNotStatic, testStaticHost}
|
||||||
|
lh.addrMap[testSameHostNotStatic] = am
|
||||||
|
out.Rebuild([]netip.Prefix{}) //???
|
||||||
|
|
||||||
|
//test that we actually have the static entry:
|
||||||
|
out = lh.Query(testStaticHost)
|
||||||
|
assert.NotNil(t, out)
|
||||||
|
assert.Equal(t, out.vpnAddrs[0], testSameHostNotStatic)
|
||||||
|
assert.Equal(t, out.vpnAddrs[1], testStaticHost)
|
||||||
|
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||||
|
|
||||||
|
//test that we actually have the static entry for BOTH:
|
||||||
|
out2 := lh.Query(testSameHostNotStatic)
|
||||||
|
assert.Same(t, out2, out)
|
||||||
|
|
||||||
|
//now do the delete
|
||||||
|
lh.DeleteVpnAddrs([]netip.Addr{testSameHostNotStatic, testStaticHost})
|
||||||
|
//verify
|
||||||
|
out = lh.Query(testSameHostNotStatic)
|
||||||
|
assert.NotNil(t, out)
|
||||||
|
if out == nil {
|
||||||
|
t.Fatal("expected non-nil query for the static host")
|
||||||
|
}
|
||||||
|
assert.Equal(t, out.vpnAddrs[0], testSameHostNotStatic)
|
||||||
|
assert.Equal(t, out.vpnAddrs[1], testStaticHost)
|
||||||
|
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLighthouse_DeletesWork(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
|
||||||
|
myUdpAddr2 := netip.MustParseAddrPort("1.2.3.4:4242")
|
||||||
|
testHost := netip.MustParseAddr("10.128.0.42")
|
||||||
|
|
||||||
|
c := config.NewC(l)
|
||||||
|
lh1 := "10.128.0.2"
|
||||||
|
c.Settings["lighthouse"] = map[string]any{
|
||||||
|
"hosts": []any{lh1},
|
||||||
|
"interval": "1s",
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Settings["listen"] = map[string]any{"port": 4242}
|
||||||
|
c.Settings["static_host_map"] = map[string]any{
|
||||||
|
lh1: []any{"1.1.1.1:4242"},
|
||||||
|
}
|
||||||
|
|
||||||
|
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
||||||
|
nt := new(bart.Lite)
|
||||||
|
nt.Insert(myVpnNet)
|
||||||
|
cs := &CertState{
|
||||||
|
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||||
|
myVpnNetworksTable: nt,
|
||||||
|
}
|
||||||
|
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
lh.ifce = &mockEncWriter{}
|
||||||
|
|
||||||
|
//insert the host
|
||||||
|
am := lh.unlockedGetRemoteList([]netip.Addr{testHost})
|
||||||
|
am.vpnAddrs = []netip.Addr{testHost}
|
||||||
|
am.addrs = []netip.AddrPort{myUdpAddr2}
|
||||||
|
lh.addrMap[testHost] = am
|
||||||
|
am.Rebuild([]netip.Prefix{}) //???
|
||||||
|
|
||||||
|
//test that we actually have the entry:
|
||||||
|
out := lh.Query(testHost)
|
||||||
|
assert.NotNil(t, out)
|
||||||
|
assert.Equal(t, out.vpnAddrs[0], testHost)
|
||||||
|
out.Rebuild([]netip.Prefix{}) //why tho
|
||||||
|
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||||
|
|
||||||
|
//now do the delete
|
||||||
|
lh.DeleteVpnAddrs([]netip.Addr{testHost})
|
||||||
|
//verify
|
||||||
|
out = lh.Query(testHost)
|
||||||
|
assert.Nil(t, out)
|
||||||
|
}
|
||||||
|
|||||||
82
main.go
82
main.go
@@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@@ -143,6 +144,20 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
// set up our UDP listener
|
// set up our UDP listener
|
||||||
udpConns := make([]udp.Conn, routines)
|
udpConns := make([]udp.Conn, routines)
|
||||||
port := c.GetInt("listen.port", 0)
|
port := c.GetInt("listen.port", 0)
|
||||||
|
enableGSO := c.GetBool("listen.enable_gso", true)
|
||||||
|
enableGRO := c.GetBool("listen.enable_gro", true)
|
||||||
|
gsoMaxSegments := c.GetInt("listen.gso_max_segments", defaultGSOMaxSegments)
|
||||||
|
if gsoMaxSegments <= 0 {
|
||||||
|
gsoMaxSegments = defaultGSOMaxSegments
|
||||||
|
}
|
||||||
|
if gsoMaxSegments > maxKernelGSOSegments {
|
||||||
|
gsoMaxSegments = maxKernelGSOSegments
|
||||||
|
}
|
||||||
|
gsoFlushTimeout := c.GetDuration("listen.gso_flush_timeout", defaultGSOFlushInterval)
|
||||||
|
if gsoFlushTimeout < 0 {
|
||||||
|
gsoFlushTimeout = 0
|
||||||
|
}
|
||||||
|
batchQueueDepth := c.GetInt("batch.queue_depth", 0)
|
||||||
|
|
||||||
if !configTest {
|
if !configTest {
|
||||||
rawListenHost := c.GetString("listen.host", "0.0.0.0")
|
rawListenHost := c.GetString("listen.host", "0.0.0.0")
|
||||||
@@ -162,13 +177,27 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
listenHost = ips[0].Unmap()
|
listenHost = ips[0].Unmap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
useWGDefault := runtime.GOOS == "linux"
|
||||||
|
useWG := c.GetBool("listen.use_wireguard_stack", useWGDefault)
|
||||||
|
var mkListener func(*logrus.Logger, netip.Addr, int, bool, int) (udp.Conn, error)
|
||||||
|
if useWG {
|
||||||
|
mkListener = udp.NewWireguardListener
|
||||||
|
} else {
|
||||||
|
mkListener = udp.NewListener
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < routines; i++ {
|
for i := 0; i < routines; i++ {
|
||||||
l.Infof("listening on %v", netip.AddrPortFrom(listenHost, uint16(port)))
|
l.Infof("listening on %v", netip.AddrPortFrom(listenHost, uint16(port)))
|
||||||
udpServer, err := udp.NewListener(l, listenHost, port, routines > 1, c.GetInt("listen.batch", 64))
|
udpServer, err := mkListener(l, listenHost, port, routines > 1, c.GetInt("listen.batch", 64))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
||||||
}
|
}
|
||||||
udpServer.ReloadConfig(c)
|
udpServer.ReloadConfig(c)
|
||||||
|
if cfg, ok := udpServer.(interface {
|
||||||
|
ConfigureOffload(bool, bool, int)
|
||||||
|
}); ok {
|
||||||
|
cfg.ConfigureOffload(enableGSO, enableGRO, gsoMaxSegments)
|
||||||
|
}
|
||||||
udpConns[i] = udpServer
|
udpConns[i] = udpServer
|
||||||
|
|
||||||
// If port is dynamic, discover it before the next pass through the for loop
|
// If port is dynamic, discover it before the next pass through the for loop
|
||||||
@@ -185,6 +214,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
|
|
||||||
hostMap := NewHostMapFromConfig(l, c)
|
hostMap := NewHostMapFromConfig(l, c)
|
||||||
punchy := NewPunchyFromConfig(l, c)
|
punchy := NewPunchyFromConfig(l, c)
|
||||||
|
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
|
||||||
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy)
|
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
||||||
@@ -220,32 +250,32 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
|
||||||
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
|
||||||
|
|
||||||
ifConfig := &InterfaceConfig{
|
ifConfig := &InterfaceConfig{
|
||||||
HostMap: hostMap,
|
HostMap: hostMap,
|
||||||
Inside: tun,
|
Inside: tun,
|
||||||
Outside: udpConns[0],
|
Outside: udpConns[0],
|
||||||
pki: pki,
|
pki: pki,
|
||||||
Firewall: fw,
|
Firewall: fw,
|
||||||
ServeDns: serveDns,
|
ServeDns: serveDns,
|
||||||
HandshakeManager: handshakeManager,
|
HandshakeManager: handshakeManager,
|
||||||
lightHouse: lightHouse,
|
connectionManager: connManager,
|
||||||
checkInterval: time.Second * time.Duration(checkInterval),
|
lightHouse: lightHouse,
|
||||||
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
|
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||||
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||||
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||||
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
EnableGSO: enableGSO,
|
||||||
routines: routines,
|
EnableGRO: enableGRO,
|
||||||
MessageMetrics: messageMetrics,
|
GSOMaxSegments: gsoMaxSegments,
|
||||||
version: buildVersion,
|
routines: routines,
|
||||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
MessageMetrics: messageMetrics,
|
||||||
punchy: punchy,
|
version: buildVersion,
|
||||||
|
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||||
|
punchy: punchy,
|
||||||
ConntrackCacheTimeout: conntrackCacheTimeout,
|
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||||
|
BatchFlushInterval: gsoFlushTimeout,
|
||||||
|
BatchQueueDepth: batchQueueDepth,
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -257,6 +287,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
|
|
||||||
ifce.writers = udpConns
|
ifce.writers = udpConns
|
||||||
|
ifce.applyOffloadConfig(enableGSO, enableGRO, gsoMaxSegments)
|
||||||
lightHouse.ifce = ifce
|
lightHouse.ifce = ifce
|
||||||
|
|
||||||
ifce.RegisterConfigChangeCallbacks(c)
|
ifce.RegisterConfigChangeCallbacks(c)
|
||||||
@@ -296,5 +327,6 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
statsStart,
|
statsStart,
|
||||||
dnsStart,
|
dnsStart,
|
||||||
lightHouse.StartUpdateWorker,
|
lightHouse.StartUpdateWorker,
|
||||||
|
connManager.Start,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
71
outside.go
71
outside.go
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/firewall"
|
"github.com/slackhq/nebula/firewall"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
|
"github.com/slackhq/nebula/overlay"
|
||||||
"golang.org/x/net/ipv4"
|
"golang.org/x/net/ipv4"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,7 +20,7 @@ const (
|
|||||||
minFwPacketLen = 4
|
minFwPacketLen = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf *LightHouseHandler, nb []byte, q int, localCache firewall.ConntrackCache) {
|
func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf *LightHouseHandler, nb []byte, q int, localCache *firewall.ConntrackCache) {
|
||||||
err := h.Parse(packet)
|
err := h.Parse(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
||||||
@@ -61,7 +62,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
|
|
||||||
switch h.Subtype {
|
switch h.Subtype {
|
||||||
case header.MessageNone:
|
case header.MessageNone:
|
||||||
if !f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache) {
|
if !f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache, ip, h.RemoteIndex) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case header.MessageRelay:
|
case header.MessageRelay:
|
||||||
@@ -81,7 +82,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
// Pull the Roaming parts up here, and return in all call paths.
|
// Pull the Roaming parts up here, and return in all call paths.
|
||||||
f.handleHostRoaming(hostinfo, ip)
|
f.handleHostRoaming(hostinfo, ip)
|
||||||
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
||||||
f.connectionManager.In(hostinfo.localIndexId)
|
f.connectionManager.In(hostinfo)
|
||||||
f.connectionManager.RelayUsed(h.RemoteIndex)
|
f.connectionManager.RelayUsed(h.RemoteIndex)
|
||||||
|
|
||||||
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
||||||
@@ -213,7 +214,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
|
|
||||||
f.handleHostRoaming(hostinfo, ip)
|
f.handleHostRoaming(hostinfo, ip)
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.localIndexId)
|
f.connectionManager.In(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
||||||
@@ -254,16 +255,18 @@ func (f *Interface) handleHostRoaming(hostinfo *HostInfo, udpAddr netip.AddrPort
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleEncrypted returns true if a packet should be processed, false otherwise
|
||||||
func (f *Interface) handleEncrypted(ci *ConnectionState, addr netip.AddrPort, h *header.H) bool {
|
func (f *Interface) handleEncrypted(ci *ConnectionState, addr netip.AddrPort, h *header.H) bool {
|
||||||
// If connectionstate exists and the replay protector allows, process packet
|
// If connectionstate does not exist, send a recv error, if possible, to encourage a fast reconnect
|
||||||
// Else, send recv errors for 300 seconds after a restart to allow fast reconnection.
|
if ci == nil {
|
||||||
if ci == nil || !ci.window.Check(f.l, h.MessageCounter) {
|
|
||||||
if addr.IsValid() {
|
if addr.IsValid() {
|
||||||
f.maybeSendRecvError(addr, h.RemoteIndex)
|
f.maybeSendRecvError(addr, h.RemoteIndex)
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// If the window check fails, refuse to process the packet, but don't send a recv error
|
||||||
|
if !ci.window.Check(f.l, h.MessageCounter) {
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
@@ -463,23 +466,45 @@ func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) bool {
|
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache *firewall.ConntrackCache, addr netip.AddrPort, recvIndex uint32) bool {
|
||||||
var err error
|
var (
|
||||||
|
err error
|
||||||
|
pkt *overlay.Packet
|
||||||
|
)
|
||||||
|
|
||||||
|
if f.batches.tunQueue(q) != nil {
|
||||||
|
pkt = f.batches.newPacket()
|
||||||
|
if pkt != nil {
|
||||||
|
out = pkt.Payload()[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if pkt != nil {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
||||||
|
if addr.IsValid() {
|
||||||
|
f.maybeSendRecvError(addr, recvIndex)
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
err = newPacket(out, true, fwPacket)
|
err = newPacket(out, true, fwPacket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if pkt != nil {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
|
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
|
||||||
Warnf("Error while validating inbound packet")
|
Warnf("Error while validating inbound packet")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
|
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
|
||||||
|
if pkt != nil {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
||||||
Debugln("dropping out of window packet")
|
Debugln("dropping out of window packet")
|
||||||
return false
|
return false
|
||||||
@@ -487,6 +512,9 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
|
|
||||||
dropReason := f.firewall.Drop(*fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache)
|
dropReason := f.firewall.Drop(*fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache)
|
||||||
if dropReason != nil {
|
if dropReason != nil {
|
||||||
|
if pkt != nil {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
|
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
|
||||||
// This gives us a buffer to build the reject packet in
|
// This gives us a buffer to build the reject packet in
|
||||||
f.rejectOutside(out, hostinfo.ConnectionState, hostinfo, nb, packet, q)
|
f.rejectOutside(out, hostinfo.ConnectionState, hostinfo, nb, packet, q)
|
||||||
@@ -498,9 +526,18 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.localIndexId)
|
f.connectionManager.In(hostinfo)
|
||||||
_, err = f.readers[q].Write(out)
|
if pkt != nil {
|
||||||
if err != nil {
|
pkt.Len = len(out)
|
||||||
|
if f.batches.enqueueTun(q, pkt) {
|
||||||
|
f.observeTunQueueLen(q)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
f.writePacketToTun(q, pkt)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = f.readers[q].Write(out); err != nil {
|
||||||
f.l.WithError(err).Error("Failed to write to tun")
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
@@ -537,10 +574,6 @@ func (f *Interface) handleRecvError(addr netip.AddrPort, h *header.H) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hostinfo.RecvErrorExceeded() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if hostinfo.remote.IsValid() && hostinfo.remote != addr {
|
if hostinfo.remote.IsValid() && hostinfo.remote != addr {
|
||||||
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package overlay
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/slackhq/nebula/routing"
|
"github.com/slackhq/nebula/routing"
|
||||||
)
|
)
|
||||||
@@ -15,3 +16,84 @@ type Device interface {
|
|||||||
RoutesFor(netip.Addr) routing.Gateways
|
RoutesFor(netip.Addr) routing.Gateways
|
||||||
NewMultiQueueReader() (io.ReadWriteCloser, error)
|
NewMultiQueueReader() (io.ReadWriteCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Packet represents a single packet buffer with optional headroom to carry
|
||||||
|
// metadata (for example virtio-net headers).
|
||||||
|
type Packet struct {
|
||||||
|
Buf []byte
|
||||||
|
Offset int
|
||||||
|
Len int
|
||||||
|
release func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Packet) Payload() []byte {
|
||||||
|
return p.Buf[p.Offset : p.Offset+p.Len]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Packet) Reset() {
|
||||||
|
p.Len = 0
|
||||||
|
p.Offset = 0
|
||||||
|
p.release = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Packet) Release() {
|
||||||
|
if p.release != nil {
|
||||||
|
p.release()
|
||||||
|
p.release = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Packet) Capacity() int {
|
||||||
|
return len(p.Buf) - p.Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
// PacketPool manages reusable buffers with headroom.
|
||||||
|
type PacketPool struct {
|
||||||
|
headroom int
|
||||||
|
blksz int
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPacketPool(headroom, payload int) *PacketPool {
|
||||||
|
p := &PacketPool{headroom: headroom, blksz: headroom + payload}
|
||||||
|
p.pool.New = func() any {
|
||||||
|
buf := make([]byte, p.blksz)
|
||||||
|
return &Packet{Buf: buf, Offset: headroom}
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PacketPool) Get() *Packet {
|
||||||
|
pkt := p.pool.Get().(*Packet)
|
||||||
|
pkt.Offset = p.headroom
|
||||||
|
pkt.Len = 0
|
||||||
|
pkt.release = func() { p.put(pkt) }
|
||||||
|
return pkt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PacketPool) put(pkt *Packet) {
|
||||||
|
pkt.Reset()
|
||||||
|
p.pool.Put(pkt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchReader allows reading multiple packets into a shared pool with
|
||||||
|
// preallocated headroom (e.g. virtio-net headers).
|
||||||
|
type BatchReader interface {
|
||||||
|
ReadIntoBatch(pool *PacketPool) ([]*Packet, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchWriter writes a slice of packets that carry their own metadata.
|
||||||
|
type BatchWriter interface {
|
||||||
|
WriteBatch(packets []*Packet) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchCapableDevice describes a device that can efficiently read and write
|
||||||
|
// batches of packets with virtio headroom.
|
||||||
|
type BatchCapableDevice interface {
|
||||||
|
Device
|
||||||
|
BatchReader
|
||||||
|
BatchWriter
|
||||||
|
BatchHeadroom() int
|
||||||
|
BatchPayloadCap() int
|
||||||
|
BatchSize() int
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package overlay
|
package overlay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@@ -70,3 +72,51 @@ func findRemovedRoutes(newRoutes, oldRoutes []Route) []Route {
|
|||||||
|
|
||||||
return removed
|
return removed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func prefixToMask(prefix netip.Prefix) netip.Addr {
|
||||||
|
pLen := 128
|
||||||
|
if prefix.Addr().Is4() {
|
||||||
|
pLen = 32
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, _ := netip.AddrFromSlice(net.CIDRMask(prefix.Bits(), pLen))
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func flipBytes(b []byte) []byte {
|
||||||
|
for i := 0; i < len(b); i++ {
|
||||||
|
b[i] ^= 0xFF
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
func orBytes(a []byte, b []byte) []byte {
|
||||||
|
ret := make([]byte, len(a))
|
||||||
|
for i := 0; i < len(a); i++ {
|
||||||
|
ret[i] = a[i] | b[i]
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBroadcast(cidr netip.Prefix) netip.Addr {
|
||||||
|
broadcast, _ := netip.AddrFromSlice(
|
||||||
|
orBytes(
|
||||||
|
cidr.Addr().AsSlice(),
|
||||||
|
flipBytes(prefixToMask(cidr).AsSlice()),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return broadcast
|
||||||
|
}
|
||||||
|
|
||||||
|
func selectGateway(dest netip.Prefix, gateways []netip.Prefix) (netip.Prefix, error) {
|
||||||
|
for _, gateway := range gateways {
|
||||||
|
if dest.Addr().Is4() && gateway.Addr().Is4() {
|
||||||
|
return gateway, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if dest.Addr().Is6() && gateway.Addr().Is6() {
|
||||||
|
return gateway, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return netip.Prefix{}, fmt.Errorf("no gateway found for %v in the list of vpn networks", dest)
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -295,7 +294,6 @@ func (t *tun) activate6(network netip.Prefix) error {
|
|||||||
Vltime: 0xffffffff,
|
Vltime: 0xffffffff,
|
||||||
Pltime: 0xffffffff,
|
Pltime: 0xffffffff,
|
||||||
},
|
},
|
||||||
//TODO: CERT-V2 should we disable DAD (duplicate address detection) and mark this as a secured address?
|
|
||||||
Flags: _IN6_IFF_NODAD,
|
Flags: _IN6_IFF_NODAD,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -554,13 +552,3 @@ func (t *tun) Name() string {
|
|||||||
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||||
return nil, fmt.Errorf("TODO: multiqueue not implemented for darwin")
|
return nil, fmt.Errorf("TODO: multiqueue not implemented for darwin")
|
||||||
}
|
}
|
||||||
|
|
||||||
func prefixToMask(prefix netip.Prefix) netip.Addr {
|
|
||||||
pLen := 128
|
|
||||||
if prefix.Addr().Is4() {
|
|
||||||
pLen = 32
|
|
||||||
}
|
|
||||||
|
|
||||||
addr, _ := netip.AddrFromSlice(net.CIDRMask(prefix.Bits(), pLen))
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,11 +10,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
"github.com/gaissmai/bart"
|
||||||
@@ -22,12 +20,18 @@ import (
|
|||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/routing"
|
"github.com/slackhq/nebula/routing"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/util"
|
||||||
|
netroute "golang.org/x/net/route"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// FIODGNAME is defined in sys/sys/filio.h on FreeBSD
|
// FIODGNAME is defined in sys/sys/filio.h on FreeBSD
|
||||||
// For 32-bit systems, use FIODGNAME_32 (not defined in this file: 0x80086678)
|
// For 32-bit systems, use FIODGNAME_32 (not defined in this file: 0x80086678)
|
||||||
FIODGNAME = 0x80106678
|
FIODGNAME = 0x80106678
|
||||||
|
TUNSIFMODE = 0x8004745e
|
||||||
|
TUNSIFHEAD = 0x80047460
|
||||||
|
OSIOCAIFADDR_IN6 = 0x8088691b
|
||||||
|
IN6_IFF_NODAD = 0x0020
|
||||||
)
|
)
|
||||||
|
|
||||||
type fiodgnameArg struct {
|
type fiodgnameArg struct {
|
||||||
@@ -37,43 +41,159 @@ type fiodgnameArg struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ifreqRename struct {
|
type ifreqRename struct {
|
||||||
Name [16]byte
|
Name [unix.IFNAMSIZ]byte
|
||||||
Data uintptr
|
Data uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
type ifreqDestroy struct {
|
type ifreqDestroy struct {
|
||||||
Name [16]byte
|
Name [unix.IFNAMSIZ]byte
|
||||||
pad [16]byte
|
pad [16]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ifReq struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
Flags uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
type ifreqMTU struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
MTU int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type addrLifetime struct {
|
||||||
|
Expire uint64
|
||||||
|
Preferred uint64
|
||||||
|
Vltime uint32
|
||||||
|
Pltime uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type ifreqAlias4 struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
Addr unix.RawSockaddrInet4
|
||||||
|
DstAddr unix.RawSockaddrInet4
|
||||||
|
MaskAddr unix.RawSockaddrInet4
|
||||||
|
VHid uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type ifreqAlias6 struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
Addr unix.RawSockaddrInet6
|
||||||
|
DstAddr unix.RawSockaddrInet6
|
||||||
|
PrefixMask unix.RawSockaddrInet6
|
||||||
|
Flags uint32
|
||||||
|
Lifetime addrLifetime
|
||||||
|
VHid uint32
|
||||||
|
}
|
||||||
|
|
||||||
type tun struct {
|
type tun struct {
|
||||||
Device string
|
Device string
|
||||||
vpnNetworks []netip.Prefix
|
vpnNetworks []netip.Prefix
|
||||||
MTU int
|
MTU int
|
||||||
Routes atomic.Pointer[[]Route]
|
Routes atomic.Pointer[[]Route]
|
||||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||||
|
linkAddr *netroute.LinkAddr
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
|
devFd int
|
||||||
|
}
|
||||||
|
|
||||||
io.ReadWriteCloser
|
func (t *tun) Read(to []byte) (int, error) {
|
||||||
|
// use readv() to read from the tunnel device, to eliminate the need for copying the buffer
|
||||||
|
if t.devFd < 0 {
|
||||||
|
return -1, syscall.EINVAL
|
||||||
|
}
|
||||||
|
|
||||||
|
// first 4 bytes is protocol family, in network byte order
|
||||||
|
head := make([]byte, 4)
|
||||||
|
|
||||||
|
iovecs := []syscall.Iovec{
|
||||||
|
{&head[0], 4},
|
||||||
|
{&to[0], uint64(len(to))},
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _, errno := syscall.Syscall(syscall.SYS_READV, uintptr(t.devFd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if errno != 0 {
|
||||||
|
err = syscall.Errno(errno)
|
||||||
|
} else {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
// fix bytes read number to exclude header
|
||||||
|
bytesRead := int(n)
|
||||||
|
if bytesRead < 0 {
|
||||||
|
return bytesRead, err
|
||||||
|
} else if bytesRead < 4 {
|
||||||
|
return 0, err
|
||||||
|
} else {
|
||||||
|
return bytesRead - 4, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is only valid for single threaded use
|
||||||
|
func (t *tun) Write(from []byte) (int, error) {
|
||||||
|
// use writev() to write to the tunnel device, to eliminate the need for copying the buffer
|
||||||
|
if t.devFd < 0 {
|
||||||
|
return -1, syscall.EINVAL
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(from) <= 1 {
|
||||||
|
return 0, syscall.EIO
|
||||||
|
}
|
||||||
|
ipVer := from[0] >> 4
|
||||||
|
var head []byte
|
||||||
|
// first 4 bytes is protocol family, in network byte order
|
||||||
|
if ipVer == 4 {
|
||||||
|
head = []byte{0, 0, 0, syscall.AF_INET}
|
||||||
|
} else if ipVer == 6 {
|
||||||
|
head = []byte{0, 0, 0, syscall.AF_INET6}
|
||||||
|
} else {
|
||||||
|
return 0, fmt.Errorf("unable to determine IP version from packet")
|
||||||
|
}
|
||||||
|
iovecs := []syscall.Iovec{
|
||||||
|
{&head[0], 4},
|
||||||
|
{&from[0], uint64(len(from))},
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _, errno := syscall.Syscall(syscall.SYS_WRITEV, uintptr(t.devFd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if errno != 0 {
|
||||||
|
err = syscall.Errno(errno)
|
||||||
|
} else {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(n) - 4, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Close() error {
|
func (t *tun) Close() error {
|
||||||
if t.ReadWriteCloser != nil {
|
if t.devFd >= 0 {
|
||||||
if err := t.ReadWriteCloser.Close(); err != nil {
|
err := syscall.Close(t.devFd)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
t.l.WithError(err).Error("Error closing device")
|
||||||
}
|
}
|
||||||
defer syscall.Close(s)
|
t.devFd = -1
|
||||||
|
|
||||||
ifreq := ifreqDestroy{Name: t.deviceBytes()}
|
c := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
// destroying the interface can block if a read() is still pending. Do this asynchronously.
|
||||||
|
defer close(c)
|
||||||
|
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||||
|
if err == nil {
|
||||||
|
defer syscall.Close(s)
|
||||||
|
ifreq := ifreqDestroy{Name: t.deviceBytes()}
|
||||||
|
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.l.WithError(err).Error("Error destroying tunnel")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Destroy the interface
|
// wait up to 1 second so we start blocking at the ioctl
|
||||||
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
|
select {
|
||||||
return err
|
case <-c:
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -85,32 +205,37 @@ func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun,
|
|||||||
|
|
||||||
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
||||||
// Try to open existing tun device
|
// Try to open existing tun device
|
||||||
var file *os.File
|
var fd int
|
||||||
var err error
|
var err error
|
||||||
deviceName := c.GetString("tun.dev", "")
|
deviceName := c.GetString("tun.dev", "")
|
||||||
if deviceName != "" {
|
if deviceName != "" {
|
||||||
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
fd, err = syscall.Open("/dev/"+deviceName, syscall.O_RDWR, 0)
|
||||||
}
|
}
|
||||||
if errors.Is(err, fs.ErrNotExist) || deviceName == "" {
|
if errors.Is(err, fs.ErrNotExist) || deviceName == "" {
|
||||||
// If the device doesn't already exist, request a new one and rename it
|
// If the device doesn't already exist, request a new one and rename it
|
||||||
file, err = os.OpenFile("/dev/tun", os.O_RDWR, 0)
|
fd, err = syscall.Open("/dev/tun", syscall.O_RDWR, 0)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rawConn, err := file.SyscallConn()
|
// Read the name of the interface
|
||||||
if err != nil {
|
var name [16]byte
|
||||||
return nil, fmt.Errorf("SyscallConn: %v", err)
|
arg := fiodgnameArg{length: 16, buf: unsafe.Pointer(&name)}
|
||||||
|
ctrlErr := ioctl(uintptr(fd), FIODGNAME, uintptr(unsafe.Pointer(&arg)))
|
||||||
|
|
||||||
|
if ctrlErr == nil {
|
||||||
|
// set broadcast mode and multicast
|
||||||
|
ifmode := uint32(unix.IFF_BROADCAST | unix.IFF_MULTICAST)
|
||||||
|
ctrlErr = ioctl(uintptr(fd), TUNSIFMODE, uintptr(unsafe.Pointer(&ifmode)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctrlErr == nil {
|
||||||
|
// turn on link-layer mode, to support ipv6
|
||||||
|
ifhead := uint32(1)
|
||||||
|
ctrlErr = ioctl(uintptr(fd), TUNSIFHEAD, uintptr(unsafe.Pointer(&ifhead)))
|
||||||
}
|
}
|
||||||
|
|
||||||
var name [16]byte
|
|
||||||
var ctrlErr error
|
|
||||||
rawConn.Control(func(fd uintptr) {
|
|
||||||
// Read the name of the interface
|
|
||||||
arg := fiodgnameArg{length: 16, buf: unsafe.Pointer(&name)}
|
|
||||||
ctrlErr = ioctl(fd, FIODGNAME, uintptr(unsafe.Pointer(&arg)))
|
|
||||||
})
|
|
||||||
if ctrlErr != nil {
|
if ctrlErr != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -122,11 +247,7 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
|||||||
|
|
||||||
// If the name doesn't match the desired interface name, rename it now
|
// If the name doesn't match the desired interface name, rename it now
|
||||||
if ifName != deviceName {
|
if ifName != deviceName {
|
||||||
s, err := syscall.Socket(
|
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
syscall.AF_INET,
|
|
||||||
syscall.SOCK_DGRAM,
|
|
||||||
syscall.IPPROTO_IP,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -149,11 +270,11 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
t := &tun{
|
t := &tun{
|
||||||
ReadWriteCloser: file,
|
Device: deviceName,
|
||||||
Device: deviceName,
|
vpnNetworks: vpnNetworks,
|
||||||
vpnNetworks: vpnNetworks,
|
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
l: l,
|
||||||
l: l,
|
devFd: fd,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = t.reload(c, true)
|
err = t.reload(c, true)
|
||||||
@@ -172,38 +293,111 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) addIp(cidr netip.Prefix) error {
|
func (t *tun) addIp(cidr netip.Prefix) error {
|
||||||
var err error
|
if cidr.Addr().Is4() {
|
||||||
// TODO use syscalls instead of exec.Command
|
ifr := ifreqAlias4{
|
||||||
cmd := exec.Command("/sbin/ifconfig", t.Device, cidr.String(), cidr.Addr().String())
|
Name: t.deviceBytes(),
|
||||||
t.l.Debug("command: ", cmd.String())
|
Addr: unix.RawSockaddrInet4{
|
||||||
if err = cmd.Run(); err != nil {
|
Len: unix.SizeofSockaddrInet4,
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
Family: unix.AF_INET,
|
||||||
|
Addr: cidr.Addr().As4(),
|
||||||
|
},
|
||||||
|
DstAddr: unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: getBroadcast(cidr).As4(),
|
||||||
|
},
|
||||||
|
MaskAddr: unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: prefixToMask(cidr).As4(),
|
||||||
|
},
|
||||||
|
VHid: 0,
|
||||||
|
}
|
||||||
|
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
// Note: unix.SIOCAIFADDR corresponds to FreeBSD's OSIOCAIFADDR
|
||||||
|
if err := ioctl(uintptr(s), unix.SIOCAIFADDR, uintptr(unsafe.Pointer(&ifr))); err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = exec.Command("/sbin/route", "-n", "add", "-net", cidr.String(), "-interface", t.Device)
|
if cidr.Addr().Is6() {
|
||||||
t.l.Debug("command: ", cmd.String())
|
ifr := ifreqAlias6{
|
||||||
if err = cmd.Run(); err != nil {
|
Name: t.deviceBytes(),
|
||||||
return fmt.Errorf("failed to run 'route add': %s", err)
|
Addr: unix.RawSockaddrInet6{
|
||||||
|
Len: unix.SizeofSockaddrInet6,
|
||||||
|
Family: unix.AF_INET6,
|
||||||
|
Addr: cidr.Addr().As16(),
|
||||||
|
},
|
||||||
|
PrefixMask: unix.RawSockaddrInet6{
|
||||||
|
Len: unix.SizeofSockaddrInet6,
|
||||||
|
Family: unix.AF_INET6,
|
||||||
|
Addr: prefixToMask(cidr).As16(),
|
||||||
|
},
|
||||||
|
Lifetime: addrLifetime{
|
||||||
|
Expire: 0,
|
||||||
|
Preferred: 0,
|
||||||
|
Vltime: 0xffffffff,
|
||||||
|
Pltime: 0xffffffff,
|
||||||
|
},
|
||||||
|
Flags: IN6_IFF_NODAD,
|
||||||
|
}
|
||||||
|
s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
if err := ioctl(uintptr(s), OSIOCAIFADDR_IN6, uintptr(unsafe.Pointer(&ifr))); err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
return fmt.Errorf("unknown address type %v", cidr)
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unsafe path routes
|
|
||||||
return t.addRoutes(false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Activate() error {
|
func (t *tun) Activate() error {
|
||||||
|
// Setup our default MTU
|
||||||
|
err := t.setMTU()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
linkAddr, err := getLinkAddr(t.Device)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if linkAddr == nil {
|
||||||
|
return fmt.Errorf("unable to discover link_addr for tun interface")
|
||||||
|
}
|
||||||
|
t.linkAddr = linkAddr
|
||||||
|
|
||||||
for i := range t.vpnNetworks {
|
for i := range t.vpnNetworks {
|
||||||
err := t.addIp(t.vpnNetworks[i])
|
err := t.addIp(t.vpnNetworks[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return t.addRoutes(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) setMTU() error {
|
||||||
|
// Set the MTU on the device
|
||||||
|
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
ifm := ifreqMTU{Name: t.deviceBytes(), MTU: int32(t.MTU)}
|
||||||
|
err = ioctl(uintptr(s), unix.SIOCSIFMTU, uintptr(unsafe.Pointer(&ifm)))
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) reload(c *config.C, initial bool) error {
|
func (t *tun) reload(c *config.C, initial bool) error {
|
||||||
@@ -268,15 +462,16 @@ func (t *tun) addRoutes(logErrors bool) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), "-interface", t.Device)
|
err := addRoute(r.Cidr, t.linkAddr)
|
||||||
t.l.Debug("command: ", cmd.String())
|
if err != nil {
|
||||||
if err := cmd.Run(); err != nil {
|
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||||
retErr := util.NewContextualError("failed to run 'route add' for unsafe_route", map[string]any{"route": r}, err)
|
|
||||||
if logErrors {
|
if logErrors {
|
||||||
retErr.Log(t.l)
|
retErr.Log(t.l)
|
||||||
} else {
|
} else {
|
||||||
return retErr
|
return retErr
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
t.l.WithField("route", r).Info("Added route")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -289,9 +484,8 @@ func (t *tun) removeRoutes(routes []Route) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.Command("/sbin/route", "-n", "delete", "-net", r.Cidr.String(), "-interface", t.Device)
|
err := delRoute(r.Cidr, t.linkAddr)
|
||||||
t.l.Debug("command: ", cmd.String())
|
if err != nil {
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
||||||
} else {
|
} else {
|
||||||
t.l.WithField("route", r).Info("Removed route")
|
t.l.WithField("route", r).Info("Removed route")
|
||||||
@@ -306,3 +500,120 @@ func (t *tun) deviceBytes() (o [16]byte) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addRoute(prefix netip.Prefix, gateway netroute.Addr) error {
|
||||||
|
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||||
|
}
|
||||||
|
defer unix.Close(sock)
|
||||||
|
|
||||||
|
route := &netroute.RouteMessage{
|
||||||
|
Version: unix.RTM_VERSION,
|
||||||
|
Type: unix.RTM_ADD,
|
||||||
|
Flags: unix.RTF_UP,
|
||||||
|
Seq: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix.Addr().Is4() {
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||||
|
unix.RTAX_GATEWAY: gateway,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||||
|
unix.RTAX_GATEWAY: gateway,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, unix.EEXIST) {
|
||||||
|
// Try to do a change
|
||||||
|
route.Type = unix.RTM_CHANGE
|
||||||
|
data, err = route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage for change: %w", err)
|
||||||
|
}
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
fmt.Println("DOING CHANGE")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func delRoute(prefix netip.Prefix, gateway netroute.Addr) error {
|
||||||
|
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||||
|
}
|
||||||
|
defer unix.Close(sock)
|
||||||
|
|
||||||
|
route := netroute.RouteMessage{
|
||||||
|
Version: unix.RTM_VERSION,
|
||||||
|
Type: unix.RTM_DELETE,
|
||||||
|
Seq: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix.Addr().Is4() {
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||||
|
unix.RTAX_GATEWAY: gateway,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||||
|
unix.RTAX_GATEWAY: gateway,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||||
|
}
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLinkAddr Gets the link address for the interface of the given name
|
||||||
|
func getLinkAddr(name string) (*netroute.LinkAddr, error) {
|
||||||
|
rib, err := netroute.FetchRIB(unix.AF_UNSPEC, unix.NET_RT_IFLIST, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
msgs, err := netroute.ParseRIB(unix.NET_RT_IFLIST, rib)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range msgs {
|
||||||
|
switch m := m.(type) {
|
||||||
|
case *netroute.InterfaceMessage:
|
||||||
|
if m.Name == name {
|
||||||
|
sa, ok := m.Addrs[unix.RTAX_IFP].(*netroute.LinkAddr)
|
||||||
|
if ok {
|
||||||
|
return sa, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@@ -19,6 +20,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/routing"
|
"github.com/slackhq/nebula/routing"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/util"
|
||||||
|
wgtun "github.com/slackhq/nebula/wgstack/tun"
|
||||||
"github.com/vishvananda/netlink"
|
"github.com/vishvananda/netlink"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
@@ -33,6 +35,7 @@ type tun struct {
|
|||||||
TXQueueLen int
|
TXQueueLen int
|
||||||
deviceIndex int
|
deviceIndex int
|
||||||
ioctlFd uintptr
|
ioctlFd uintptr
|
||||||
|
wgDevice wgtun.Device
|
||||||
|
|
||||||
Routes atomic.Pointer[[]Route]
|
Routes atomic.Pointer[[]Route]
|
||||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||||
@@ -68,7 +71,9 @@ type ifreqQLEN struct {
|
|||||||
func newTunFromFd(c *config.C, l *logrus.Logger, deviceFd int, vpnNetworks []netip.Prefix) (*tun, error) {
|
func newTunFromFd(c *config.C, l *logrus.Logger, deviceFd int, vpnNetworks []netip.Prefix) (*tun, error) {
|
||||||
file := os.NewFile(uintptr(deviceFd), "/dev/net/tun")
|
file := os.NewFile(uintptr(deviceFd), "/dev/net/tun")
|
||||||
|
|
||||||
t, err := newTunGeneric(c, l, file, vpnNetworks)
|
useWGDefault := runtime.GOOS == "linux"
|
||||||
|
useWG := c.GetBool("tun.use_wireguard_stack", c.GetBool("listen.use_wireguard_stack", useWGDefault))
|
||||||
|
t, err := newTunGeneric(c, l, file, vpnNetworks, useWG)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -113,7 +118,9 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, multiqueu
|
|||||||
name := strings.Trim(string(req.Name[:]), "\x00")
|
name := strings.Trim(string(req.Name[:]), "\x00")
|
||||||
|
|
||||||
file := os.NewFile(uintptr(fd), "/dev/net/tun")
|
file := os.NewFile(uintptr(fd), "/dev/net/tun")
|
||||||
t, err := newTunGeneric(c, l, file, vpnNetworks)
|
useWGDefault := runtime.GOOS == "linux"
|
||||||
|
useWG := c.GetBool("tun.use_wireguard_stack", c.GetBool("listen.use_wireguard_stack", useWGDefault))
|
||||||
|
t, err := newTunGeneric(c, l, file, vpnNetworks, useWG)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -123,16 +130,45 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, multiqueu
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTunGeneric(c *config.C, l *logrus.Logger, file *os.File, vpnNetworks []netip.Prefix) (*tun, error) {
|
func newTunGeneric(c *config.C, l *logrus.Logger, file *os.File, vpnNetworks []netip.Prefix, useWireguard bool) (*tun, error) {
|
||||||
|
var (
|
||||||
|
rw io.ReadWriteCloser = file
|
||||||
|
fd = int(file.Fd())
|
||||||
|
wgDev wgtun.Device
|
||||||
|
)
|
||||||
|
|
||||||
|
if useWireguard {
|
||||||
|
dev, err := wgtun.CreateTUNFromFile(file, c.GetInt("tun.mtu", DefaultMTU))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize wireguard tun device: %w", err)
|
||||||
|
}
|
||||||
|
wgDev = dev
|
||||||
|
rw = newWireguardTunIO(dev, c.GetInt("tun.mtu", DefaultMTU))
|
||||||
|
fd = int(dev.File().Fd())
|
||||||
|
}
|
||||||
|
|
||||||
t := &tun{
|
t := &tun{
|
||||||
ReadWriteCloser: file,
|
ReadWriteCloser: rw,
|
||||||
fd: int(file.Fd()),
|
fd: fd,
|
||||||
vpnNetworks: vpnNetworks,
|
vpnNetworks: vpnNetworks,
|
||||||
TXQueueLen: c.GetInt("tun.tx_queue", 500),
|
TXQueueLen: c.GetInt("tun.tx_queue", 500),
|
||||||
useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
|
useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
|
||||||
useSystemRoutesBufferSize: c.GetInt("tun.use_system_route_table_buffer_size", 0),
|
useSystemRoutesBufferSize: c.GetInt("tun.use_system_route_table_buffer_size", 0),
|
||||||
l: l,
|
l: l,
|
||||||
}
|
}
|
||||||
|
if wgDev != nil {
|
||||||
|
t.wgDevice = wgDev
|
||||||
|
}
|
||||||
|
if wgDev != nil {
|
||||||
|
// replace ioctl fd with device file descriptor to keep route management working
|
||||||
|
file = wgDev.File()
|
||||||
|
t.fd = int(file.Fd())
|
||||||
|
t.ioctlFd = file.Fd()
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.ioctlFd == 0 {
|
||||||
|
t.ioctlFd = file.Fd()
|
||||||
|
}
|
||||||
|
|
||||||
err := t.reload(c, true)
|
err := t.reload(c, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -293,7 +329,6 @@ func (t *tun) addIPs(link netlink.Link) error {
|
|||||||
|
|
||||||
//add all new addresses
|
//add all new addresses
|
||||||
for i := range newAddrs {
|
for i := range newAddrs {
|
||||||
//TODO: CERT-V2 do we want to stack errors and try as many ops as possible?
|
|
||||||
//AddrReplace still adds new IPs, but if their properties change it will change them as well
|
//AddrReplace still adds new IPs, but if their properties change it will change them as well
|
||||||
if err := netlink.AddrReplace(link, newAddrs[i]); err != nil {
|
if err := netlink.AddrReplace(link, newAddrs[i]); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -361,6 +396,11 @@ func (t *tun) Activate() error {
|
|||||||
t.l.WithError(err).Error("Failed to set tun tx queue length")
|
t.l.WithError(err).Error("Failed to set tun tx queue length")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const modeNone = 1
|
||||||
|
if err = netlink.LinkSetIP6AddrGenMode(link, modeNone); err != nil {
|
||||||
|
t.l.WithError(err).Warn("Failed to disable link local address generation")
|
||||||
|
}
|
||||||
|
|
||||||
if err = t.addIPs(link); err != nil {
|
if err = t.addIPs(link); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -638,6 +678,11 @@ func (t *tun) updateRoutes(r netlink.RouteUpdate) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.Dst == nil {
|
||||||
|
t.l.WithField("route", r).Debug("Ignoring route update, no destination address")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
dstAddr, ok := netip.AddrFromSlice(r.Dst.IP)
|
dstAddr, ok := netip.AddrFromSlice(r.Dst.IP)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.l.WithField("route", r).Debug("Ignoring route update, invalid destination address")
|
t.l.WithField("route", r).Debug("Ignoring route update, invalid destination address")
|
||||||
@@ -669,6 +714,14 @@ func (t *tun) Close() error {
|
|||||||
_ = t.ReadWriteCloser.Close()
|
_ = t.ReadWriteCloser.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.wgDevice != nil {
|
||||||
|
_ = t.wgDevice.Close()
|
||||||
|
if t.ioctlFd > 0 {
|
||||||
|
// underlying fd already closed by the device
|
||||||
|
t.ioctlFd = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if t.ioctlFd > 0 {
|
if t.ioctlFd > 0 {
|
||||||
_ = os.NewFile(t.ioctlFd, "ioctlFd").Close()
|
_ = os.NewFile(t.ioctlFd, "ioctlFd").Close()
|
||||||
}
|
}
|
||||||
|
|||||||
56
overlay/tun_linux_batch.go
Normal file
56
overlay/tun_linux_batch.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
//go:build linux && !android && !e2e_testing
|
||||||
|
|
||||||
|
package overlay
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
func (t *tun) batchIO() (*wireguardTunIO, bool) {
|
||||||
|
io, ok := t.ReadWriteCloser.(*wireguardTunIO)
|
||||||
|
return io, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) ReadIntoBatch(pool *PacketPool) ([]*Packet, error) {
|
||||||
|
io, ok := t.batchIO()
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("wireguard batch I/O not enabled")
|
||||||
|
}
|
||||||
|
return io.ReadIntoBatch(pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) WriteBatch(packets []*Packet) (int, error) {
|
||||||
|
io, ok := t.batchIO()
|
||||||
|
if ok {
|
||||||
|
return io.WriteBatch(packets)
|
||||||
|
}
|
||||||
|
for _, pkt := range packets {
|
||||||
|
if pkt == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, err := t.Write(pkt.Payload()[:pkt.Len]); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
return len(packets), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) BatchHeadroom() int {
|
||||||
|
if io, ok := t.batchIO(); ok {
|
||||||
|
return io.BatchHeadroom()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) BatchPayloadCap() int {
|
||||||
|
if io, ok := t.batchIO(); ok {
|
||||||
|
return io.BatchPayloadCap()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) BatchSize() int {
|
||||||
|
if io, ok := t.batchIO(); ok {
|
||||||
|
return io.BatchSize()
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
@@ -4,13 +4,12 @@
|
|||||||
package overlay
|
package overlay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
@@ -20,11 +19,42 @@ import (
|
|||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/routing"
|
"github.com/slackhq/nebula/routing"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/util"
|
||||||
|
netroute "golang.org/x/net/route"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ifreqDestroy struct {
|
const (
|
||||||
Name [16]byte
|
SIOCAIFADDR_IN6 = 0x8080696b
|
||||||
pad [16]byte
|
TUNSIFHEAD = 0x80047442
|
||||||
|
TUNSIFMODE = 0x80047458
|
||||||
|
)
|
||||||
|
|
||||||
|
type ifreqAlias4 struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
Addr unix.RawSockaddrInet4
|
||||||
|
DstAddr unix.RawSockaddrInet4
|
||||||
|
MaskAddr unix.RawSockaddrInet4
|
||||||
|
}
|
||||||
|
|
||||||
|
type ifreqAlias6 struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
Addr unix.RawSockaddrInet6
|
||||||
|
DstAddr unix.RawSockaddrInet6
|
||||||
|
PrefixMask unix.RawSockaddrInet6
|
||||||
|
Flags uint32
|
||||||
|
Lifetime addrLifetime
|
||||||
|
}
|
||||||
|
|
||||||
|
type ifreq struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
data int
|
||||||
|
}
|
||||||
|
|
||||||
|
type addrLifetime struct {
|
||||||
|
Expire uint64
|
||||||
|
Preferred uint64
|
||||||
|
Vltime uint32
|
||||||
|
Pltime uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
type tun struct {
|
type tun struct {
|
||||||
@@ -34,40 +64,18 @@ type tun struct {
|
|||||||
Routes atomic.Pointer[[]Route]
|
Routes atomic.Pointer[[]Route]
|
||||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
|
f *os.File
|
||||||
io.ReadWriteCloser
|
fd int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Close() error {
|
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
||||||
if t.ReadWriteCloser != nil {
|
|
||||||
if err := t.ReadWriteCloser.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer syscall.Close(s)
|
|
||||||
|
|
||||||
ifreq := ifreqDestroy{Name: t.deviceBytes()}
|
|
||||||
|
|
||||||
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun, error) {
|
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun, error) {
|
||||||
return nil, fmt.Errorf("newTunFromFd not supported in NetBSD")
|
return nil, fmt.Errorf("newTunFromFd not supported in NetBSD")
|
||||||
}
|
}
|
||||||
|
|
||||||
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
|
||||||
|
|
||||||
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
||||||
// Try to open tun device
|
// Try to open tun device
|
||||||
var file *os.File
|
|
||||||
var err error
|
var err error
|
||||||
deviceName := c.GetString("tun.dev", "")
|
deviceName := c.GetString("tun.dev", "")
|
||||||
if deviceName == "" {
|
if deviceName == "" {
|
||||||
@@ -77,17 +85,23 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
|||||||
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
|
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
fd, err := unix.Open("/dev/"+deviceName, os.O_RDWR, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = unix.SetNonblock(fd, true)
|
||||||
|
if err != nil {
|
||||||
|
l.WithError(err).Warn("Failed to set the tun device as nonblocking")
|
||||||
|
}
|
||||||
|
|
||||||
t := &tun{
|
t := &tun{
|
||||||
ReadWriteCloser: file,
|
f: os.NewFile(uintptr(fd), ""),
|
||||||
Device: deviceName,
|
fd: fd,
|
||||||
vpnNetworks: vpnNetworks,
|
Device: deviceName,
|
||||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
vpnNetworks: vpnNetworks,
|
||||||
l: l,
|
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = t.reload(c, true)
|
err = t.reload(c, true)
|
||||||
@@ -105,40 +119,225 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *tun) Close() error {
|
||||||
|
if t.f != nil {
|
||||||
|
if err := t.f.Close(); err != nil {
|
||||||
|
return fmt.Errorf("error closing tun file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.f.Close should have handled it for us but let's be extra sure
|
||||||
|
_ = unix.Close(t.fd)
|
||||||
|
|
||||||
|
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
ifr := ifreq{Name: t.deviceBytes()}
|
||||||
|
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifr)))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) Read(to []byte) (int, error) {
|
||||||
|
rc, err := t.f.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to get syscall conn for tun: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errno syscall.Errno
|
||||||
|
var n uintptr
|
||||||
|
err = rc.Read(func(fd uintptr) bool {
|
||||||
|
// first 4 bytes is protocol family, in network byte order
|
||||||
|
head := [4]byte{}
|
||||||
|
iovecs := []syscall.Iovec{
|
||||||
|
{&head[0], 4},
|
||||||
|
{&to[0], uint64(len(to))},
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _, errno = syscall.Syscall(syscall.SYS_READV, fd, uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||||
|
if errno.Temporary() {
|
||||||
|
// We got an EAGAIN, EINTR, or EWOULDBLOCK, go again
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if err == syscall.EBADF || err.Error() == "use of closed file" {
|
||||||
|
// Go doesn't export poll.ErrFileClosing but happily reports it to us so here we are
|
||||||
|
// https://github.com/golang/go/blob/master/src/internal/poll/fd_poll_runtime.go#L121
|
||||||
|
return 0, os.ErrClosed
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("failed to make read call for tun: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, fmt.Errorf("failed to make inner read call for tun: %w", errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fix bytes read number to exclude header
|
||||||
|
bytesRead := int(n)
|
||||||
|
if bytesRead < 0 {
|
||||||
|
return bytesRead, nil
|
||||||
|
} else if bytesRead < 4 {
|
||||||
|
return 0, nil
|
||||||
|
} else {
|
||||||
|
return bytesRead - 4, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is only valid for single threaded use
|
||||||
|
func (t *tun) Write(from []byte) (int, error) {
|
||||||
|
if len(from) <= 1 {
|
||||||
|
return 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
ipVer := from[0] >> 4
|
||||||
|
var head [4]byte
|
||||||
|
// first 4 bytes is protocol family, in network byte order
|
||||||
|
if ipVer == 4 {
|
||||||
|
head[3] = syscall.AF_INET
|
||||||
|
} else if ipVer == 6 {
|
||||||
|
head[3] = syscall.AF_INET6
|
||||||
|
} else {
|
||||||
|
return 0, fmt.Errorf("unable to determine IP version from packet")
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := t.f.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var errno syscall.Errno
|
||||||
|
var n uintptr
|
||||||
|
err = rc.Write(func(fd uintptr) bool {
|
||||||
|
iovecs := []syscall.Iovec{
|
||||||
|
{&head[0], 4},
|
||||||
|
{&from[0], uint64(len(from))},
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _, errno = syscall.Syscall(syscall.SYS_WRITEV, fd, uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||||
|
// According to NetBSD documentation for TUN, writes will only return errors in which
|
||||||
|
// this packet will never be delivered so just go on living life.
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(n) - 4, err
|
||||||
|
}
|
||||||
|
|
||||||
func (t *tun) addIp(cidr netip.Prefix) error {
|
func (t *tun) addIp(cidr netip.Prefix) error {
|
||||||
var err error
|
if cidr.Addr().Is4() {
|
||||||
|
var req ifreqAlias4
|
||||||
|
req.Name = t.deviceBytes()
|
||||||
|
req.Addr = unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: cidr.Addr().As4(),
|
||||||
|
}
|
||||||
|
req.DstAddr = unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: cidr.Addr().As4(),
|
||||||
|
}
|
||||||
|
req.MaskAddr = unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: prefixToMask(cidr).As4(),
|
||||||
|
}
|
||||||
|
|
||||||
// TODO use syscalls instead of exec.Command
|
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
cmd := exec.Command("/sbin/ifconfig", t.Device, cidr.String(), cidr.Addr().String())
|
if err != nil {
|
||||||
t.l.Debug("command: ", cmd.String())
|
return err
|
||||||
if err = cmd.Run(); err != nil {
|
}
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
if err := ioctl(uintptr(s), unix.SIOCAIFADDR, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = exec.Command("/sbin/route", "-n", "add", "-net", cidr.String(), cidr.Addr().String())
|
if cidr.Addr().Is6() {
|
||||||
t.l.Debug("command: ", cmd.String())
|
var req ifreqAlias6
|
||||||
if err = cmd.Run(); err != nil {
|
req.Name = t.deviceBytes()
|
||||||
return fmt.Errorf("failed to run 'route add': %s", err)
|
req.Addr = unix.RawSockaddrInet6{
|
||||||
|
Len: unix.SizeofSockaddrInet6,
|
||||||
|
Family: unix.AF_INET6,
|
||||||
|
Addr: cidr.Addr().As16(),
|
||||||
|
}
|
||||||
|
req.PrefixMask = unix.RawSockaddrInet6{
|
||||||
|
Len: unix.SizeofSockaddrInet6,
|
||||||
|
Family: unix.AF_INET6,
|
||||||
|
Addr: prefixToMask(cidr).As16(),
|
||||||
|
}
|
||||||
|
req.Lifetime = addrLifetime{
|
||||||
|
Vltime: 0xffffffff,
|
||||||
|
Pltime: 0xffffffff,
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
if err := ioctl(uintptr(s), SIOCAIFADDR_IN6, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
return fmt.Errorf("unknown address type %v", cidr)
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unsafe path routes
|
|
||||||
return t.addRoutes(false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Activate() error {
|
func (t *tun) Activate() error {
|
||||||
|
mode := int32(unix.IFF_BROADCAST)
|
||||||
|
err := ioctl(uintptr(t.fd), TUNSIFMODE, uintptr(unsafe.Pointer(&mode)))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun device mode: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
v := 1
|
||||||
|
err = ioctl(uintptr(t.fd), TUNSIFHEAD, uintptr(unsafe.Pointer(&v)))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun device head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = t.doIoctlByName(unix.SIOCSIFMTU, uint32(t.MTU))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun mtu: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
for i := range t.vpnNetworks {
|
for i := range t.vpnNetworks {
|
||||||
err := t.addIp(t.vpnNetworks[i])
|
err = t.addIp(t.vpnNetworks[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return t.addRoutes(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) doIoctlByName(ctl uintptr, value uint32) error {
|
||||||
|
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
ir := ifreq{Name: t.deviceBytes(), data: int(value)}
|
||||||
|
err = ioctl(uintptr(s), ctl, uintptr(unsafe.Pointer(&ir)))
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) reload(c *config.C, initial bool) error {
|
func (t *tun) reload(c *config.C, initial bool) error {
|
||||||
@@ -197,21 +396,23 @@ func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
|||||||
|
|
||||||
func (t *tun) addRoutes(logErrors bool) error {
|
func (t *tun) addRoutes(logErrors bool) error {
|
||||||
routes := *t.Routes.Load()
|
routes := *t.Routes.Load()
|
||||||
|
|
||||||
for _, r := range routes {
|
for _, r := range routes {
|
||||||
if len(r.Via) == 0 || !r.Install {
|
if len(r.Via) == 0 || !r.Install {
|
||||||
// We don't allow route MTUs so only install routes with a via
|
// We don't allow route MTUs so only install routes with a via
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
err := addRoute(r.Cidr, t.vpnNetworks)
|
||||||
t.l.Debug("command: ", cmd.String())
|
if err != nil {
|
||||||
if err := cmd.Run(); err != nil {
|
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||||
retErr := util.NewContextualError("failed to run 'route add' for unsafe_route", map[string]any{"route": r}, err)
|
|
||||||
if logErrors {
|
if logErrors {
|
||||||
retErr.Log(t.l)
|
retErr.Log(t.l)
|
||||||
} else {
|
} else {
|
||||||
return retErr
|
return retErr
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
t.l.WithField("route", r).Info("Added route")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,10 +425,8 @@ func (t *tun) removeRoutes(routes []Route) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: CERT-V2 is this right?
|
err := delRoute(r.Cidr, t.vpnNetworks)
|
||||||
cmd := exec.Command("/sbin/route", "-n", "delete", "-net", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
if err != nil {
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
||||||
} else {
|
} else {
|
||||||
t.l.WithField("route", r).Info("Removed route")
|
t.l.WithField("route", r).Info("Removed route")
|
||||||
@@ -242,3 +441,109 @@ func (t *tun) deviceBytes() (o [16]byte) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||||
|
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||||
|
}
|
||||||
|
defer unix.Close(sock)
|
||||||
|
|
||||||
|
route := &netroute.RouteMessage{
|
||||||
|
Version: unix.RTM_VERSION,
|
||||||
|
Type: unix.RTM_ADD,
|
||||||
|
Flags: unix.RTF_UP | unix.RTF_GATEWAY,
|
||||||
|
Seq: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix.Addr().Is4() {
|
||||||
|
gw, err := selectGateway(prefix, gateways)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
gw, err := selectGateway(prefix, gateways)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, unix.EEXIST) {
|
||||||
|
// Try to do a change
|
||||||
|
route.Type = unix.RTM_CHANGE
|
||||||
|
data, err = route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage for change: %w", err)
|
||||||
|
}
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func delRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||||
|
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||||
|
}
|
||||||
|
defer unix.Close(sock)
|
||||||
|
|
||||||
|
route := netroute.RouteMessage{
|
||||||
|
Version: unix.RTM_VERSION,
|
||||||
|
Type: unix.RTM_DELETE,
|
||||||
|
Seq: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix.Addr().Is4() {
|
||||||
|
gw, err := selectGateway(prefix, gateways)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
gw, err := selectGateway(prefix, gateways)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||||
|
}
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,23 +4,50 @@
|
|||||||
package overlay
|
package overlay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
"github.com/gaissmai/bart"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/routing"
|
"github.com/slackhq/nebula/routing"
|
||||||
"github.com/slackhq/nebula/util"
|
"github.com/slackhq/nebula/util"
|
||||||
|
netroute "golang.org/x/net/route"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SIOCAIFADDR_IN6 = 0x8080691a
|
||||||
|
)
|
||||||
|
|
||||||
|
type ifreqAlias4 struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
Addr unix.RawSockaddrInet4
|
||||||
|
DstAddr unix.RawSockaddrInet4
|
||||||
|
MaskAddr unix.RawSockaddrInet4
|
||||||
|
}
|
||||||
|
|
||||||
|
type ifreqAlias6 struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
Addr unix.RawSockaddrInet6
|
||||||
|
DstAddr unix.RawSockaddrInet6
|
||||||
|
PrefixMask unix.RawSockaddrInet6
|
||||||
|
Flags uint32
|
||||||
|
Lifetime [2]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type ifreq struct {
|
||||||
|
Name [unix.IFNAMSIZ]byte
|
||||||
|
data int
|
||||||
|
}
|
||||||
|
|
||||||
type tun struct {
|
type tun struct {
|
||||||
Device string
|
Device string
|
||||||
vpnNetworks []netip.Prefix
|
vpnNetworks []netip.Prefix
|
||||||
@@ -28,48 +55,46 @@ type tun struct {
|
|||||||
Routes atomic.Pointer[[]Route]
|
Routes atomic.Pointer[[]Route]
|
||||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
|
f *os.File
|
||||||
io.ReadWriteCloser
|
fd int
|
||||||
|
|
||||||
// cache out buffer since we need to prepend 4 bytes for tun metadata
|
// cache out buffer since we need to prepend 4 bytes for tun metadata
|
||||||
out []byte
|
out []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Close() error {
|
|
||||||
if t.ReadWriteCloser != nil {
|
|
||||||
return t.ReadWriteCloser.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun, error) {
|
|
||||||
return nil, fmt.Errorf("newTunFromFd not supported in OpenBSD")
|
|
||||||
}
|
|
||||||
|
|
||||||
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
||||||
|
|
||||||
|
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun, error) {
|
||||||
|
return nil, fmt.Errorf("newTunFromFd not supported in openbsd")
|
||||||
|
}
|
||||||
|
|
||||||
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
||||||
|
// Try to open tun device
|
||||||
|
var err error
|
||||||
deviceName := c.GetString("tun.dev", "")
|
deviceName := c.GetString("tun.dev", "")
|
||||||
if deviceName == "" {
|
if deviceName == "" {
|
||||||
return nil, fmt.Errorf("a device name in the format of tunN must be specified")
|
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !deviceNameRE.MatchString(deviceName) {
|
if !deviceNameRE.MatchString(deviceName) {
|
||||||
return nil, fmt.Errorf("a device name in the format of tunN must be specified")
|
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
fd, err := unix.Open("/dev/"+deviceName, os.O_RDWR, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = unix.SetNonblock(fd, true)
|
||||||
|
if err != nil {
|
||||||
|
l.WithError(err).Warn("Failed to set the tun device as nonblocking")
|
||||||
|
}
|
||||||
|
|
||||||
t := &tun{
|
t := &tun{
|
||||||
ReadWriteCloser: file,
|
f: os.NewFile(uintptr(fd), ""),
|
||||||
Device: deviceName,
|
fd: fd,
|
||||||
vpnNetworks: vpnNetworks,
|
Device: deviceName,
|
||||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
vpnNetworks: vpnNetworks,
|
||||||
l: l,
|
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||||
|
l: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = t.reload(c, true)
|
err = t.reload(c, true)
|
||||||
@@ -87,6 +112,154 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *tun) Close() error {
|
||||||
|
if t.f != nil {
|
||||||
|
if err := t.f.Close(); err != nil {
|
||||||
|
return fmt.Errorf("error closing tun file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.f.Close should have handled it for us but let's be extra sure
|
||||||
|
_ = unix.Close(t.fd)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) Read(to []byte) (int, error) {
|
||||||
|
buf := make([]byte, len(to)+4)
|
||||||
|
|
||||||
|
n, err := t.f.Read(buf)
|
||||||
|
|
||||||
|
copy(to, buf[4:])
|
||||||
|
return n - 4, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is only valid for single threaded use
|
||||||
|
func (t *tun) Write(from []byte) (int, error) {
|
||||||
|
buf := t.out
|
||||||
|
if cap(buf) < len(from)+4 {
|
||||||
|
buf = make([]byte, len(from)+4)
|
||||||
|
t.out = buf
|
||||||
|
}
|
||||||
|
buf = buf[:len(from)+4]
|
||||||
|
|
||||||
|
if len(from) == 0 {
|
||||||
|
return 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the IP Family for the NULL L2 Header
|
||||||
|
ipVer := from[0] >> 4
|
||||||
|
if ipVer == 4 {
|
||||||
|
buf[3] = syscall.AF_INET
|
||||||
|
} else if ipVer == 6 {
|
||||||
|
buf[3] = syscall.AF_INET6
|
||||||
|
} else {
|
||||||
|
return 0, fmt.Errorf("unable to determine IP version from packet")
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(buf[4:], from)
|
||||||
|
|
||||||
|
n, err := t.f.Write(buf)
|
||||||
|
return n - 4, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) addIp(cidr netip.Prefix) error {
|
||||||
|
if cidr.Addr().Is4() {
|
||||||
|
var req ifreqAlias4
|
||||||
|
req.Name = t.deviceBytes()
|
||||||
|
req.Addr = unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: cidr.Addr().As4(),
|
||||||
|
}
|
||||||
|
req.DstAddr = unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: cidr.Addr().As4(),
|
||||||
|
}
|
||||||
|
req.MaskAddr = unix.RawSockaddrInet4{
|
||||||
|
Len: unix.SizeofSockaddrInet4,
|
||||||
|
Family: unix.AF_INET,
|
||||||
|
Addr: prefixToMask(cidr).As4(),
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
if err := ioctl(uintptr(s), unix.SIOCAIFADDR, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = addRoute(cidr, t.vpnNetworks)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set route for vpn network %v: %w", cidr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cidr.Addr().Is6() {
|
||||||
|
var req ifreqAlias6
|
||||||
|
req.Name = t.deviceBytes()
|
||||||
|
req.Addr = unix.RawSockaddrInet6{
|
||||||
|
Len: unix.SizeofSockaddrInet6,
|
||||||
|
Family: unix.AF_INET6,
|
||||||
|
Addr: cidr.Addr().As16(),
|
||||||
|
}
|
||||||
|
req.PrefixMask = unix.RawSockaddrInet6{
|
||||||
|
Len: unix.SizeofSockaddrInet6,
|
||||||
|
Family: unix.AF_INET6,
|
||||||
|
Addr: prefixToMask(cidr).As16(),
|
||||||
|
}
|
||||||
|
req.Lifetime[0] = 0xffffffff
|
||||||
|
req.Lifetime[1] = 0xffffffff
|
||||||
|
|
||||||
|
s, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
if err := ioctl(uintptr(s), SIOCAIFADDR_IN6, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("unknown address type %v", cidr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) Activate() error {
|
||||||
|
err := t.doIoctlByName(unix.SIOCSIFMTU, uint32(t.MTU))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set tun mtu: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range t.vpnNetworks {
|
||||||
|
err = t.addIp(t.vpnNetworks[i])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.addRoutes(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) doIoctlByName(ctl uintptr, value uint32) error {
|
||||||
|
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer syscall.Close(s)
|
||||||
|
|
||||||
|
ir := ifreq{Name: t.deviceBytes(), data: int(value)}
|
||||||
|
err = ioctl(uintptr(s), ctl, uintptr(unsafe.Pointer(&ir)))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (t *tun) reload(c *config.C, initial bool) error {
|
func (t *tun) reload(c *config.C, initial bool) error {
|
||||||
change, routes, err := getAllRoutesFromConfig(c, t.vpnNetworks, initial)
|
change, routes, err := getAllRoutesFromConfig(c, t.vpnNetworks, initial)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -124,86 +297,11 @@ func (t *tun) reload(c *config.C, initial bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) addIp(cidr netip.Prefix) error {
|
|
||||||
var err error
|
|
||||||
// TODO use syscalls instead of exec.Command
|
|
||||||
cmd := exec.Command("/sbin/ifconfig", t.Device, cidr.String(), cidr.Addr().String())
|
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd = exec.Command("/sbin/route", "-n", "add", "-inet", cidr.String(), cidr.Addr().String())
|
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to run 'route add': %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unsafe path routes
|
|
||||||
return t.addRoutes(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) Activate() error {
|
|
||||||
for i := range t.vpnNetworks {
|
|
||||||
err := t.addIp(t.vpnNetworks[i])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||||
r, _ := t.routeTree.Load().Lookup(ip)
|
r, _ := t.routeTree.Load().Lookup(ip)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) addRoutes(logErrors bool) error {
|
|
||||||
routes := *t.Routes.Load()
|
|
||||||
for _, r := range routes {
|
|
||||||
if len(r.Via) == 0 || !r.Install {
|
|
||||||
// We don't allow route MTUs so only install routes with a via
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
//TODO: CERT-V2 is this right?
|
|
||||||
cmd := exec.Command("/sbin/route", "-n", "add", "-inet", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
retErr := util.NewContextualError("failed to run 'route add' for unsafe_route", map[string]any{"route": r}, err)
|
|
||||||
if logErrors {
|
|
||||||
retErr.Log(t.l)
|
|
||||||
} else {
|
|
||||||
return retErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) removeRoutes(routes []Route) error {
|
|
||||||
for _, r := range routes {
|
|
||||||
if !r.Install {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
//TODO: CERT-V2 is this right?
|
|
||||||
cmd := exec.Command("/sbin/route", "-n", "delete", "-inet", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
|
||||||
t.l.Debug("command: ", cmd.String())
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
|
||||||
} else {
|
|
||||||
t.l.WithField("route", r).Info("Removed route")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tun) Networks() []netip.Prefix {
|
func (t *tun) Networks() []netip.Prefix {
|
||||||
return t.vpnNetworks
|
return t.vpnNetworks
|
||||||
}
|
}
|
||||||
@@ -213,43 +311,159 @@ func (t *tun) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||||
return nil, fmt.Errorf("TODO: multiqueue not implemented for freebsd")
|
return nil, fmt.Errorf("TODO: multiqueue not implemented for openbsd")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tun) Read(to []byte) (int, error) {
|
func (t *tun) addRoutes(logErrors bool) error {
|
||||||
buf := make([]byte, len(to)+4)
|
routes := *t.Routes.Load()
|
||||||
|
|
||||||
n, err := t.ReadWriteCloser.Read(buf)
|
for _, r := range routes {
|
||||||
|
if len(r.Via) == 0 || !r.Install {
|
||||||
|
// We don't allow route MTUs so only install routes with a via
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
copy(to, buf[4:])
|
err := addRoute(r.Cidr, t.vpnNetworks)
|
||||||
return n - 4, err
|
if err != nil {
|
||||||
|
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||||
|
if logErrors {
|
||||||
|
retErr.Log(t.l)
|
||||||
|
} else {
|
||||||
|
return retErr
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.l.WithField("route", r).Info("Added route")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write is only valid for single threaded use
|
func (t *tun) removeRoutes(routes []Route) error {
|
||||||
func (t *tun) Write(from []byte) (int, error) {
|
for _, r := range routes {
|
||||||
buf := t.out
|
if !r.Install {
|
||||||
if cap(buf) < len(from)+4 {
|
continue
|
||||||
buf = make([]byte, len(from)+4)
|
}
|
||||||
t.out = buf
|
|
||||||
}
|
|
||||||
buf = buf[:len(from)+4]
|
|
||||||
|
|
||||||
if len(from) == 0 {
|
err := delRoute(r.Cidr, t.vpnNetworks)
|
||||||
return 0, syscall.EIO
|
if err != nil {
|
||||||
|
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
||||||
|
} else {
|
||||||
|
t.l.WithField("route", r).Info("Removed route")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tun) deviceBytes() (o [16]byte) {
|
||||||
|
for i, c := range t.Device {
|
||||||
|
o[i] = byte(c)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func addRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||||
|
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||||
|
}
|
||||||
|
defer unix.Close(sock)
|
||||||
|
|
||||||
|
route := &netroute.RouteMessage{
|
||||||
|
Version: unix.RTM_VERSION,
|
||||||
|
Type: unix.RTM_ADD,
|
||||||
|
Flags: unix.RTF_UP | unix.RTF_GATEWAY,
|
||||||
|
Seq: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the IP Family for the NULL L2 Header
|
if prefix.Addr().Is4() {
|
||||||
ipVer := from[0] >> 4
|
gw, err := selectGateway(prefix, gateways)
|
||||||
if ipVer == 4 {
|
if err != nil {
|
||||||
buf[3] = syscall.AF_INET
|
return err
|
||||||
} else if ipVer == 6 {
|
}
|
||||||
buf[3] = syscall.AF_INET6
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return 0, fmt.Errorf("unable to determine IP version from packet")
|
gw, err := selectGateway(prefix, gateways)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(buf[4:], from)
|
data, err := route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
n, err := t.ReadWriteCloser.Write(buf)
|
_, err = unix.Write(sock, data[:])
|
||||||
return n - 4, err
|
if err != nil {
|
||||||
|
if errors.Is(err, unix.EEXIST) {
|
||||||
|
// Try to do a change
|
||||||
|
route.Type = unix.RTM_CHANGE
|
||||||
|
data, err = route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage for change: %w", err)
|
||||||
|
}
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func delRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||||
|
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||||
|
}
|
||||||
|
defer unix.Close(sock)
|
||||||
|
|
||||||
|
route := netroute.RouteMessage{
|
||||||
|
Version: unix.RTM_VERSION,
|
||||||
|
Type: unix.RTM_DELETE,
|
||||||
|
Seq: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix.Addr().Is4() {
|
||||||
|
gw, err := selectGateway(prefix, gateways)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
gw, err := selectGateway(prefix, gateways)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
route.Addrs = []netroute.Addr{
|
||||||
|
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||||
|
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||||
|
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := route.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||||
|
}
|
||||||
|
_, err = unix.Write(sock, data[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
220
overlay/wireguard_tun_linux.go
Normal file
220
overlay/wireguard_tun_linux.go
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
//go:build linux && !android && !e2e_testing
|
||||||
|
|
||||||
|
package overlay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
wgtun "github.com/slackhq/nebula/wgstack/tun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type wireguardTunIO struct {
|
||||||
|
dev wgtun.Device
|
||||||
|
mtu int
|
||||||
|
batchSize int
|
||||||
|
|
||||||
|
readMu sync.Mutex
|
||||||
|
readBuffers [][]byte
|
||||||
|
readLens []int
|
||||||
|
legacyBuf []byte
|
||||||
|
|
||||||
|
writeMu sync.Mutex
|
||||||
|
writeBuf []byte
|
||||||
|
writeWrap [][]byte
|
||||||
|
writeBuffers [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWireguardTunIO(dev wgtun.Device, mtu int) *wireguardTunIO {
|
||||||
|
batch := dev.BatchSize()
|
||||||
|
if batch <= 0 {
|
||||||
|
batch = 1
|
||||||
|
}
|
||||||
|
if mtu <= 0 {
|
||||||
|
mtu = DefaultMTU
|
||||||
|
}
|
||||||
|
return &wireguardTunIO{
|
||||||
|
dev: dev,
|
||||||
|
mtu: mtu,
|
||||||
|
batchSize: batch,
|
||||||
|
readLens: make([]int, batch),
|
||||||
|
legacyBuf: make([]byte, wgtun.VirtioNetHdrLen+mtu),
|
||||||
|
writeBuf: make([]byte, wgtun.VirtioNetHdrLen+mtu),
|
||||||
|
writeWrap: make([][]byte, 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) Read(p []byte) (int, error) {
|
||||||
|
w.readMu.Lock()
|
||||||
|
defer w.readMu.Unlock()
|
||||||
|
|
||||||
|
bufs := w.readBuffers
|
||||||
|
if len(bufs) == 0 {
|
||||||
|
bufs = [][]byte{w.legacyBuf}
|
||||||
|
w.readBuffers = bufs
|
||||||
|
}
|
||||||
|
n, err := w.dev.Read(bufs[:1], w.readLens[:1], wgtun.VirtioNetHdrLen)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
length := w.readLens[0]
|
||||||
|
copy(p, w.legacyBuf[wgtun.VirtioNetHdrLen:wgtun.VirtioNetHdrLen+length])
|
||||||
|
return length, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) Write(p []byte) (int, error) {
|
||||||
|
if len(p) > w.mtu {
|
||||||
|
return 0, fmt.Errorf("wireguard tun: payload exceeds MTU (%d > %d)", len(p), w.mtu)
|
||||||
|
}
|
||||||
|
w.writeMu.Lock()
|
||||||
|
defer w.writeMu.Unlock()
|
||||||
|
buf := w.writeBuf[:wgtun.VirtioNetHdrLen+len(p)]
|
||||||
|
for i := 0; i < wgtun.VirtioNetHdrLen; i++ {
|
||||||
|
buf[i] = 0
|
||||||
|
}
|
||||||
|
copy(buf[wgtun.VirtioNetHdrLen:], p)
|
||||||
|
w.writeWrap[0] = buf
|
||||||
|
n, err := w.dev.Write(w.writeWrap, wgtun.VirtioNetHdrLen)
|
||||||
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) ReadIntoBatch(pool *PacketPool) ([]*Packet, error) {
|
||||||
|
if pool == nil {
|
||||||
|
return nil, fmt.Errorf("wireguard tun: packet pool is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
w.readMu.Lock()
|
||||||
|
defer w.readMu.Unlock()
|
||||||
|
|
||||||
|
if len(w.readBuffers) < w.batchSize {
|
||||||
|
w.readBuffers = make([][]byte, w.batchSize)
|
||||||
|
}
|
||||||
|
if len(w.readLens) < w.batchSize {
|
||||||
|
w.readLens = make([]int, w.batchSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
packets := make([]*Packet, w.batchSize)
|
||||||
|
requiredHeadroom := w.BatchHeadroom()
|
||||||
|
requiredPayload := w.BatchPayloadCap()
|
||||||
|
headroom := 0
|
||||||
|
for i := 0; i < w.batchSize; i++ {
|
||||||
|
pkt := pool.Get()
|
||||||
|
if pkt == nil {
|
||||||
|
releasePackets(packets[:i])
|
||||||
|
return nil, fmt.Errorf("wireguard tun: packet pool returned nil packet")
|
||||||
|
}
|
||||||
|
if pkt.Capacity() < requiredPayload {
|
||||||
|
pkt.Release()
|
||||||
|
releasePackets(packets[:i])
|
||||||
|
return nil, fmt.Errorf("wireguard tun: packet capacity %d below required %d", pkt.Capacity(), requiredPayload)
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
headroom = pkt.Offset
|
||||||
|
if headroom < requiredHeadroom {
|
||||||
|
pkt.Release()
|
||||||
|
releasePackets(packets[:i])
|
||||||
|
return nil, fmt.Errorf("wireguard tun: packet headroom %d below virtio requirement %d", headroom, requiredHeadroom)
|
||||||
|
}
|
||||||
|
} else if pkt.Offset != headroom {
|
||||||
|
pkt.Release()
|
||||||
|
releasePackets(packets[:i])
|
||||||
|
return nil, fmt.Errorf("wireguard tun: inconsistent packet headroom (%d != %d)", pkt.Offset, headroom)
|
||||||
|
}
|
||||||
|
packets[i] = pkt
|
||||||
|
w.readBuffers[i] = pkt.Buf
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := w.dev.Read(w.readBuffers[:w.batchSize], w.readLens[:w.batchSize], headroom)
|
||||||
|
if err != nil {
|
||||||
|
releasePackets(packets)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
releasePackets(packets)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
packets[i].Len = w.readLens[i]
|
||||||
|
}
|
||||||
|
for i := n; i < w.batchSize; i++ {
|
||||||
|
packets[i].Release()
|
||||||
|
packets[i] = nil
|
||||||
|
}
|
||||||
|
return packets[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) WriteBatch(packets []*Packet) (int, error) {
|
||||||
|
if len(packets) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
requiredHeadroom := w.BatchHeadroom()
|
||||||
|
offset := packets[0].Offset
|
||||||
|
if offset < requiredHeadroom {
|
||||||
|
releasePackets(packets)
|
||||||
|
return 0, fmt.Errorf("wireguard tun: packet offset %d smaller than required headroom %d", offset, requiredHeadroom)
|
||||||
|
}
|
||||||
|
for _, pkt := range packets {
|
||||||
|
if pkt == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pkt.Offset != offset {
|
||||||
|
releasePackets(packets)
|
||||||
|
return 0, fmt.Errorf("wireguard tun: mixed packet offsets not supported")
|
||||||
|
}
|
||||||
|
limit := pkt.Offset + pkt.Len
|
||||||
|
if limit > len(pkt.Buf) {
|
||||||
|
releasePackets(packets)
|
||||||
|
return 0, fmt.Errorf("wireguard tun: packet length %d exceeds buffer capacity %d", pkt.Len, len(pkt.Buf)-pkt.Offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.writeMu.Lock()
|
||||||
|
defer w.writeMu.Unlock()
|
||||||
|
|
||||||
|
if len(w.writeBuffers) < len(packets) {
|
||||||
|
w.writeBuffers = make([][]byte, len(packets))
|
||||||
|
}
|
||||||
|
for i, pkt := range packets {
|
||||||
|
if pkt == nil {
|
||||||
|
w.writeBuffers[i] = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
limit := pkt.Offset + pkt.Len
|
||||||
|
w.writeBuffers[i] = pkt.Buf[:limit]
|
||||||
|
}
|
||||||
|
n, err := w.dev.Write(w.writeBuffers[:len(packets)], offset)
|
||||||
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
releasePackets(packets)
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) BatchHeadroom() int {
|
||||||
|
return wgtun.VirtioNetHdrLen
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) BatchPayloadCap() int {
|
||||||
|
return w.mtu
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) BatchSize() int {
|
||||||
|
return w.batchSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wireguardTunIO) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func releasePackets(pkts []*Packet) {
|
||||||
|
for _, pkt := range pkts {
|
||||||
|
if pkt != nil {
|
||||||
|
pkt.Release()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -180,6 +180,7 @@ func (c *PKClient) DeriveNoise(peerPubKey []byte) ([]byte, error) {
|
|||||||
pkcs11.NewAttribute(pkcs11.CKA_DECRYPT, true),
|
pkcs11.NewAttribute(pkcs11.CKA_DECRYPT, true),
|
||||||
pkcs11.NewAttribute(pkcs11.CKA_WRAP, true),
|
pkcs11.NewAttribute(pkcs11.CKA_WRAP, true),
|
||||||
pkcs11.NewAttribute(pkcs11.CKA_UNWRAP, true),
|
pkcs11.NewAttribute(pkcs11.CKA_UNWRAP, true),
|
||||||
|
pkcs11.NewAttribute(pkcs11.CKA_VALUE_LEN, NoiseKeySize),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up the parameters which include the peer's public key
|
// Set up the parameters which include the peer's public key
|
||||||
|
|||||||
90
pki.go
90
pki.go
@@ -100,55 +100,62 @@ func (p *PKI) reloadCerts(c *config.C, initial bool) *util.ContextualError {
|
|||||||
currentState := p.cs.Load()
|
currentState := p.cs.Load()
|
||||||
if newState.v1Cert != nil {
|
if newState.v1Cert != nil {
|
||||||
if currentState.v1Cert == nil {
|
if currentState.v1Cert == nil {
|
||||||
return util.NewContextualError("v1 certificate was added, restart required", nil, err)
|
//adding certs is fine, actually. Networks-in-common confirmed in newCertState().
|
||||||
}
|
} else {
|
||||||
|
// did IP in cert change? if so, don't set
|
||||||
|
if !slices.Equal(currentState.v1Cert.Networks(), newState.v1Cert.Networks()) {
|
||||||
|
return util.NewContextualError(
|
||||||
|
"Networks in new cert was different from old",
|
||||||
|
m{"new_networks": newState.v1Cert.Networks(), "old_networks": currentState.v1Cert.Networks(), "cert_version": cert.Version1},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// did IP in cert change? if so, don't set
|
if currentState.v1Cert.Curve() != newState.v1Cert.Curve() {
|
||||||
if !slices.Equal(currentState.v1Cert.Networks(), newState.v1Cert.Networks()) {
|
return util.NewContextualError(
|
||||||
return util.NewContextualError(
|
"Curve in new v1 cert was different from old",
|
||||||
"Networks in new cert was different from old",
|
m{"new_curve": newState.v1Cert.Curve(), "old_curve": currentState.v1Cert.Curve(), "cert_version": cert.Version1},
|
||||||
m{"new_networks": newState.v1Cert.Networks(), "old_networks": currentState.v1Cert.Networks()},
|
nil,
|
||||||
nil,
|
)
|
||||||
)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentState.v1Cert.Curve() != newState.v1Cert.Curve() {
|
|
||||||
return util.NewContextualError(
|
|
||||||
"Curve in new cert was different from old",
|
|
||||||
m{"new_curve": newState.v1Cert.Curve(), "old_curve": currentState.v1Cert.Curve()},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if currentState.v1Cert != nil {
|
|
||||||
//TODO: CERT-V2 we should be able to tear this down
|
|
||||||
return util.NewContextualError("v1 certificate was removed, restart required", nil, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if newState.v2Cert != nil {
|
if newState.v2Cert != nil {
|
||||||
if currentState.v2Cert == nil {
|
if currentState.v2Cert == nil {
|
||||||
return util.NewContextualError("v2 certificate was added, restart required", nil, err)
|
//adding certs is fine, actually
|
||||||
}
|
} else {
|
||||||
|
// did IP in cert change? if so, don't set
|
||||||
|
if !slices.Equal(currentState.v2Cert.Networks(), newState.v2Cert.Networks()) {
|
||||||
|
return util.NewContextualError(
|
||||||
|
"Networks in new cert was different from old",
|
||||||
|
m{"new_networks": newState.v2Cert.Networks(), "old_networks": currentState.v2Cert.Networks(), "cert_version": cert.Version2},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// did IP in cert change? if so, don't set
|
if currentState.v2Cert.Curve() != newState.v2Cert.Curve() {
|
||||||
if !slices.Equal(currentState.v2Cert.Networks(), newState.v2Cert.Networks()) {
|
return util.NewContextualError(
|
||||||
return util.NewContextualError(
|
"Curve in new cert was different from old",
|
||||||
"Networks in new cert was different from old",
|
m{"new_curve": newState.v2Cert.Curve(), "old_curve": currentState.v2Cert.Curve(), "cert_version": cert.Version2},
|
||||||
m{"new_networks": newState.v2Cert.Networks(), "old_networks": currentState.v2Cert.Networks()},
|
nil,
|
||||||
nil,
|
)
|
||||||
)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if currentState.v2Cert.Curve() != newState.v2Cert.Curve() {
|
|
||||||
return util.NewContextualError(
|
|
||||||
"Curve in new cert was different from old",
|
|
||||||
m{"new_curve": newState.v2Cert.Curve(), "old_curve": currentState.v2Cert.Curve()},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if currentState.v2Cert != nil {
|
} else if currentState.v2Cert != nil {
|
||||||
return util.NewContextualError("v2 certificate was removed, restart required", nil, err)
|
//newState.v1Cert is non-nil bc empty certstates aren't permitted
|
||||||
|
if newState.v1Cert == nil {
|
||||||
|
return util.NewContextualError("v1 and v2 certs are nil, this should be impossible", nil, err)
|
||||||
|
}
|
||||||
|
//if we're going to v1-only, we need to make sure we didn't orphan any v2-cert vpnaddrs
|
||||||
|
if !slices.Equal(currentState.v2Cert.Networks(), newState.v1Cert.Networks()) {
|
||||||
|
return util.NewContextualError(
|
||||||
|
"Removing a V2 cert is not permitted unless it has identical networks to the new V1 cert",
|
||||||
|
m{"new_v1_networks": newState.v1Cert.Networks(), "old_v2_networks": currentState.v2Cert.Networks()},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cipher cant be hot swapped so just leave it at what it was before
|
// Cipher cant be hot swapped so just leave it at what it was before
|
||||||
@@ -173,7 +180,6 @@ func (p *PKI) reloadCerts(c *config.C, initial bool) *util.ContextualError {
|
|||||||
|
|
||||||
p.cs.Store(newState)
|
p.cs.Store(newState)
|
||||||
|
|
||||||
//TODO: CERT-V2 newState needs a stringer that does json
|
|
||||||
if initial {
|
if initial {
|
||||||
p.l.WithField("cert", newState).Debug("Client nebula certificate(s)")
|
p.l.WithField("cert", newState).Debug("Client nebula certificate(s)")
|
||||||
} else {
|
} else {
|
||||||
@@ -359,7 +365,9 @@ func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, p
|
|||||||
return nil, util.NewContextualError("v1 and v2 curve are not the same, ignoring", nil, nil)
|
return nil, util.NewContextualError("v1 and v2 curve are not the same, ignoring", nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: CERT-V2 make sure v2 has v1s address
|
if v1.Networks()[0] != v2.Networks()[0] {
|
||||||
|
return nil, util.NewContextualError("v1 and v2 networks are not the same", nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
cs.initiatingVersion = dv
|
cs.initiatingVersion = dv
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ import (
|
|||||||
"slices"
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// forEachFunc is used to benefit folks that want to do work inside the lock
|
// forEachFunc is used to benefit folks that want to do work inside the lock
|
||||||
@@ -185,12 +185,12 @@ func (hr *hostnamesResults) GetAddrs() []netip.AddrPort {
|
|||||||
// It serves as a local cache of query replies, host update notifications, and locally learned addresses
|
// It serves as a local cache of query replies, host update notifications, and locally learned addresses
|
||||||
type RemoteList struct {
|
type RemoteList struct {
|
||||||
// Every interaction with internals requires a lock!
|
// Every interaction with internals requires a lock!
|
||||||
synctrace.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
// The full list of vpn addresses assigned to this host
|
// The full list of vpn addresses assigned to this host
|
||||||
vpnAddrs []netip.Addr
|
vpnAddrs []netip.Addr
|
||||||
|
|
||||||
// A deduplicated set of addresses. Any accessor should lock beforehand.
|
// A deduplicated set of underlay addresses. Any accessor should lock beforehand.
|
||||||
addrs []netip.AddrPort
|
addrs []netip.AddrPort
|
||||||
|
|
||||||
// A set of relay addresses. VpnIp addresses that the remote identified as relays.
|
// A set of relay addresses. VpnIp addresses that the remote identified as relays.
|
||||||
@@ -201,8 +201,10 @@ type RemoteList struct {
|
|||||||
// For learned addresses, this is the vpnIp that sent the packet
|
// For learned addresses, this is the vpnIp that sent the packet
|
||||||
cache map[netip.Addr]*cache
|
cache map[netip.Addr]*cache
|
||||||
|
|
||||||
hr *hostnamesResults
|
hr *hostnamesResults
|
||||||
shouldAdd func(netip.Addr) bool
|
|
||||||
|
// shouldAdd is a nillable function that decides if x should be added to addrs.
|
||||||
|
shouldAdd func(vpnAddrs []netip.Addr, x netip.Addr) bool
|
||||||
|
|
||||||
// This is a list of remotes that we have tried to handshake with and have returned from the wrong vpn ip.
|
// This is a list of remotes that we have tried to handshake with and have returned from the wrong vpn ip.
|
||||||
// They should not be tried again during a handshake
|
// They should not be tried again during a handshake
|
||||||
@@ -213,9 +215,8 @@ type RemoteList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewRemoteList creates a new empty RemoteList
|
// NewRemoteList creates a new empty RemoteList
|
||||||
func NewRemoteList(vpnAddrs []netip.Addr, shouldAdd func(netip.Addr) bool) *RemoteList {
|
func NewRemoteList(vpnAddrs []netip.Addr, shouldAdd func([]netip.Addr, netip.Addr) bool) *RemoteList {
|
||||||
r := &RemoteList{
|
r := &RemoteList{
|
||||||
RWMutex: synctrace.NewRWMutex("remote-list"),
|
|
||||||
vpnAddrs: make([]netip.Addr, len(vpnAddrs)),
|
vpnAddrs: make([]netip.Addr, len(vpnAddrs)),
|
||||||
addrs: make([]netip.AddrPort, 0),
|
addrs: make([]netip.AddrPort, 0),
|
||||||
relays: make([]netip.Addr, 0),
|
relays: make([]netip.Addr, 0),
|
||||||
@@ -369,6 +370,15 @@ func (r *RemoteList) CopyBlockedRemotes() []netip.AddrPort {
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RefreshFromHandshake locks and updates the RemoteList to account for data learned upon a completed handshake
|
||||||
|
func (r *RemoteList) RefreshFromHandshake(vpnAddrs []netip.Addr) {
|
||||||
|
r.Lock()
|
||||||
|
r.badRemotes = nil
|
||||||
|
r.vpnAddrs = make([]netip.Addr, len(vpnAddrs))
|
||||||
|
copy(r.vpnAddrs, vpnAddrs)
|
||||||
|
r.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// ResetBlockedRemotes locks and clears the blocked remotes list
|
// ResetBlockedRemotes locks and clears the blocked remotes list
|
||||||
func (r *RemoteList) ResetBlockedRemotes() {
|
func (r *RemoteList) ResetBlockedRemotes() {
|
||||||
r.Lock()
|
r.Lock()
|
||||||
@@ -578,7 +588,7 @@ func (r *RemoteList) unlockedCollect() {
|
|||||||
|
|
||||||
dnsAddrs := r.hr.GetAddrs()
|
dnsAddrs := r.hr.GetAddrs()
|
||||||
for _, addr := range dnsAddrs {
|
for _, addr := range dnsAddrs {
|
||||||
if r.shouldAdd == nil || r.shouldAdd(addr.Addr()) {
|
if r.shouldAdd == nil || r.shouldAdd(r.vpnAddrs, addr.Addr()) {
|
||||||
if !r.unlockedIsBad(addr) {
|
if !r.unlockedIsBad(addr) {
|
||||||
addrs = append(addrs, addr)
|
addrs = append(addrs, addr)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/armon/go-radix"
|
"github.com/armon/go-radix"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ type SSHServer struct {
|
|||||||
listener net.Listener
|
listener net.Listener
|
||||||
|
|
||||||
// Locks the conns/counter to avoid concurrent map access
|
// Locks the conns/counter to avoid concurrent map access
|
||||||
connsLock synctrace.Mutex
|
connsLock sync.Mutex
|
||||||
conns map[int]*session
|
conns map[int]*session
|
||||||
counter int
|
counter int
|
||||||
}
|
}
|
||||||
@@ -41,7 +41,6 @@ func NewSSHServer(l *logrus.Entry) (*SSHServer, error) {
|
|||||||
l: l,
|
l: l,
|
||||||
commands: radix.New(),
|
commands: radix.New(),
|
||||||
conns: make(map[int]*session),
|
conns: make(map[int]*session),
|
||||||
connsLock: synctrace.NewMutex("ssh-server-conns"),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cc := ssh.CertChecker{
|
cc := ssh.CertChecker{
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// How many timer objects should be cached
|
// How many timer objects should be cached
|
||||||
@@ -35,7 +34,7 @@ type TimerWheel[T any] struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type LockingTimerWheel[T any] struct {
|
type LockingTimerWheel[T any] struct {
|
||||||
m synctrace.Mutex
|
m sync.Mutex
|
||||||
t *TimerWheel[T]
|
t *TimerWheel[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,9 +81,8 @@ func NewTimerWheel[T any](min, max time.Duration) *TimerWheel[T] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewLockingTimerWheel is version of TimerWheel that is safe for concurrent use with a small performance penalty
|
// NewLockingTimerWheel is version of TimerWheel that is safe for concurrent use with a small performance penalty
|
||||||
func NewLockingTimerWheel[T any](name string, min, max time.Duration) *LockingTimerWheel[T] {
|
func NewLockingTimerWheel[T any](min, max time.Duration) *LockingTimerWheel[T] {
|
||||||
return &LockingTimerWheel[T]{
|
return &LockingTimerWheel[T]{
|
||||||
m: synctrace.NewMutex(name),
|
|
||||||
t: NewTimerWheel[T](min, max),
|
t: NewTimerWheel[T](min, max),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
12
udp/conn.go
12
udp/conn.go
@@ -22,6 +22,18 @@ type Conn interface {
|
|||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Datagram represents a UDP payload destined to a specific address.
|
||||||
|
type Datagram struct {
|
||||||
|
Payload []byte
|
||||||
|
Addr netip.AddrPort
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchConn can send multiple datagrams in one syscall.
|
||||||
|
type BatchConn interface {
|
||||||
|
Conn
|
||||||
|
WriteBatch(pkts []Datagram) error
|
||||||
|
}
|
||||||
|
|
||||||
type NoopConn struct{}
|
type NoopConn struct{}
|
||||||
|
|
||||||
func (NoopConn) Rebind() error {
|
func (NoopConn) Rebind() error {
|
||||||
|
|||||||
5
udp/errors.go
Normal file
5
udp/errors.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
package udp
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var ErrInvalidIPv6RemoteForSocket = errors.New("listener is IPv4, but writing to IPv6 remote")
|
||||||
@@ -3,20 +3,62 @@
|
|||||||
|
|
||||||
package udp
|
package udp
|
||||||
|
|
||||||
// Darwin support is primarily implemented in udp_generic, besides NewListenConfig
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type StdConn struct {
|
||||||
|
*net.UDPConn
|
||||||
|
isV4 bool
|
||||||
|
sysFd uintptr
|
||||||
|
l *logrus.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Conn = &StdConn{}
|
||||||
|
|
||||||
func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) {
|
func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) {
|
||||||
return NewGenericListener(l, ip, port, multi, batch)
|
lc := NewListenConfig(multi)
|
||||||
|
pc, err := lc.ListenPacket(context.TODO(), "udp", net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if uc, ok := pc.(*net.UDPConn); ok {
|
||||||
|
c := &StdConn{UDPConn: uc, l: l}
|
||||||
|
|
||||||
|
rc, err := uc.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open udp socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rc.Control(func(fd uintptr) {
|
||||||
|
c.sysFd = fd
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get udp fd: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
la, err := c.LocalAddr()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.isV4 = la.Addr().Is4()
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unexpected PacketConn: %T %#v", pc, pc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewListenConfig(multi bool) net.ListenConfig {
|
func NewListenConfig(multi bool) net.ListenConfig {
|
||||||
@@ -43,16 +85,116 @@ func NewListenConfig(multi bool) net.ListenConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *GenericConn) Rebind() error {
|
//go:linkname sendto golang.org/x/sys/unix.sendto
|
||||||
rc, err := u.UDPConn.SyscallConn()
|
//go:noescape
|
||||||
if err != nil {
|
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen int32) (err error)
|
||||||
return err
|
|
||||||
|
func (u *StdConn) WriteTo(b []byte, ap netip.AddrPort) error {
|
||||||
|
var sa unsafe.Pointer
|
||||||
|
var addrLen int32
|
||||||
|
|
||||||
|
if u.isV4 {
|
||||||
|
if ap.Addr().Is6() {
|
||||||
|
return ErrInvalidIPv6RemoteForSocket
|
||||||
|
}
|
||||||
|
|
||||||
|
var rsa unix.RawSockaddrInet6
|
||||||
|
rsa.Family = unix.AF_INET6
|
||||||
|
rsa.Addr = ap.Addr().As16()
|
||||||
|
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
|
||||||
|
sa = unsafe.Pointer(&rsa)
|
||||||
|
addrLen = syscall.SizeofSockaddrInet4
|
||||||
|
} else {
|
||||||
|
var rsa unix.RawSockaddrInet6
|
||||||
|
rsa.Family = unix.AF_INET6
|
||||||
|
rsa.Addr = ap.Addr().As16()
|
||||||
|
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
|
||||||
|
sa = unsafe.Pointer(&rsa)
|
||||||
|
addrLen = syscall.SizeofSockaddrInet6
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc.Control(func(fd uintptr) {
|
// Golang stdlib doesn't handle EAGAIN correctly in some situations so we do writes ourselves
|
||||||
err := syscall.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, 0)
|
// See https://github.com/golang/go/issues/73919
|
||||||
if err != nil {
|
for {
|
||||||
u.l.WithError(err).Error("Failed to rebind udp socket")
|
//_, _, err := unix.Syscall6(unix.SYS_SENDTO, u.sysFd, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0, sa, addrLen)
|
||||||
|
err := sendto(int(u.sysFd), b, 0, sa, addrLen)
|
||||||
|
if err == nil {
|
||||||
|
// Written, get out before the error handling
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
})
|
|
||||||
|
if errors.Is(err, syscall.EINTR) {
|
||||||
|
// Write was interrupted, retry
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, syscall.EAGAIN) {
|
||||||
|
return &net.OpError{Op: "sendto", Err: unix.EWOULDBLOCK}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, syscall.EBADF) {
|
||||||
|
return net.ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
return &net.OpError{Op: "sendto", Err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) LocalAddr() (netip.AddrPort, error) {
|
||||||
|
a := u.UDPConn.LocalAddr()
|
||||||
|
|
||||||
|
switch v := a.(type) {
|
||||||
|
case *net.UDPAddr:
|
||||||
|
addr, ok := netip.AddrFromSlice(v.IP)
|
||||||
|
if !ok {
|
||||||
|
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned invalid IP address: %s", v.IP)
|
||||||
|
}
|
||||||
|
return netip.AddrPortFrom(addr, uint16(v.Port)), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned: %#v", a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) ReloadConfig(c *config.C) {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUDPStatsEmitter(udpConns []Conn) func() {
|
||||||
|
// No UDP stats for non-linux
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) ListenOut(r EncReader) {
|
||||||
|
buffer := make([]byte, MTU)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Just read one packet at a time
|
||||||
|
n, rua, err := u.ReadFromUDPAddrPort(buffer)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, net.ErrClosed) {
|
||||||
|
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
u.l.WithError(err).Error("unexpected udp socket receive error")
|
||||||
|
}
|
||||||
|
|
||||||
|
r(netip.AddrPortFrom(rua.Addr().Unmap(), rua.Port()), buffer[:n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) Rebind() error {
|
||||||
|
var err error
|
||||||
|
if u.isV4 {
|
||||||
|
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IP, syscall.IP_BOUND_IF, 0)
|
||||||
|
} else {
|
||||||
|
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IPV6, syscall.IPV6_BOUND_IF, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
u.l.WithError(err).Error("Failed to rebind udp socket")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
//go:build (!linux || android) && !e2e_testing
|
//go:build (!linux || android) && !e2e_testing && !darwin
|
||||||
// +build !linux android
|
// +build !linux android
|
||||||
// +build !e2e_testing
|
// +build !e2e_testing
|
||||||
|
// +build !darwin
|
||||||
|
|
||||||
// udp_generic implements the nebula UDP interface in pure Go stdlib. This
|
// udp_generic implements the nebula UDP interface in pure Go stdlib. This
|
||||||
// means it can be used on platforms like Darwin and Windows.
|
// means it can be used on platforms like Darwin and Windows.
|
||||||
|
|||||||
@@ -221,7 +221,7 @@ func (u *StdConn) writeTo6(b []byte, ip netip.AddrPort) error {
|
|||||||
|
|
||||||
func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error {
|
func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error {
|
||||||
if !ip.Addr().Is4() {
|
if !ip.Addr().Is4() {
|
||||||
return fmt.Errorf("Listener is IPv4, but writing to IPv6 remote")
|
return ErrInvalidIPv6RemoteForSocket
|
||||||
}
|
}
|
||||||
|
|
||||||
var rsa unix.RawSockaddrInet4
|
var rsa unix.RawSockaddrInet4
|
||||||
@@ -310,31 +310,51 @@ func (u *StdConn) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewUDPStatsEmitter(udpConns []Conn) func() {
|
func NewUDPStatsEmitter(udpConns []Conn) func() {
|
||||||
// Check if our kernel supports SO_MEMINFO before registering the gauges
|
if len(udpConns) == 0 {
|
||||||
var udpGauges [][unix.SK_MEMINFO_VARS]metrics.Gauge
|
return func() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
type statsProvider struct {
|
||||||
|
index int
|
||||||
|
conn *StdConn
|
||||||
|
}
|
||||||
|
|
||||||
|
providers := make([]statsProvider, 0, len(udpConns))
|
||||||
|
for i, c := range udpConns {
|
||||||
|
if sc, ok := c.(*StdConn); ok {
|
||||||
|
providers = append(providers, statsProvider{index: i, conn: sc})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(providers) == 0 {
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
|
||||||
var meminfo [unix.SK_MEMINFO_VARS]uint32
|
var meminfo [unix.SK_MEMINFO_VARS]uint32
|
||||||
if err := udpConns[0].(*StdConn).getMemInfo(&meminfo); err == nil {
|
if err := providers[0].conn.getMemInfo(&meminfo); err != nil {
|
||||||
udpGauges = make([][unix.SK_MEMINFO_VARS]metrics.Gauge, len(udpConns))
|
return func() {}
|
||||||
for i := range udpConns {
|
}
|
||||||
udpGauges[i] = [unix.SK_MEMINFO_VARS]metrics.Gauge{
|
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rmem_alloc", i), nil),
|
udpGauges := make([][unix.SK_MEMINFO_VARS]metrics.Gauge, len(providers))
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rcvbuf", i), nil),
|
for i, provider := range providers {
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_alloc", i), nil),
|
udpGauges[i] = [unix.SK_MEMINFO_VARS]metrics.Gauge{
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.sndbuf", i), nil),
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rmem_alloc", provider.index), nil),
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.fwd_alloc", i), nil),
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rcvbuf", provider.index), nil),
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_queued", i), nil),
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_alloc", provider.index), nil),
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.optmem", i), nil),
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.sndbuf", provider.index), nil),
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.backlog", i), nil),
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.fwd_alloc", provider.index), nil),
|
||||||
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.drops", i), nil),
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_queued", provider.index), nil),
|
||||||
}
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.optmem", provider.index), nil),
|
||||||
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.backlog", provider.index), nil),
|
||||||
|
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.drops", provider.index), nil),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return func() {
|
return func() {
|
||||||
for i, gauges := range udpGauges {
|
for i, provider := range providers {
|
||||||
if err := udpConns[i].(*StdConn).getMemInfo(&meminfo); err == nil {
|
if err := provider.conn.getMemInfo(&meminfo); err == nil {
|
||||||
for j := 0; j < unix.SK_MEMINFO_VARS; j++ {
|
for j := 0; j < unix.SK_MEMINFO_VARS; j++ {
|
||||||
gauges[j].Update(int64(meminfo[j]))
|
udpGauges[i][j].Update(int64(meminfo[j]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,13 +11,13 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/wadey/synctrace"
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
"golang.zx2c4.com/wireguard/conn/winrio"
|
"golang.zx2c4.com/wireguard/conn/winrio"
|
||||||
)
|
)
|
||||||
@@ -46,7 +46,7 @@ type ringBuffer struct {
|
|||||||
iocp windows.Handle
|
iocp windows.Handle
|
||||||
isFull bool
|
isFull bool
|
||||||
cq winrio.Cq
|
cq winrio.Cq
|
||||||
mu synctrace.Mutex
|
mu sync.Mutex
|
||||||
overlapped windows.Overlapped
|
overlapped windows.Overlapped
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,11 +64,7 @@ func NewRIOListener(l *logrus.Logger, addr netip.Addr, port int) (*RIOConn, erro
|
|||||||
return nil, errors.New("could not initialize winrio")
|
return nil, errors.New("could not initialize winrio")
|
||||||
}
|
}
|
||||||
|
|
||||||
u := &RIOConn{
|
u := &RIOConn{l: l}
|
||||||
l: l,
|
|
||||||
rx: ringBuffer{mu: synctrace.NewMutex("rio-rx")},
|
|
||||||
tx: ringBuffer{mu: synctrace.NewMutex("rio-tx")},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := u.bind(&windows.SockaddrInet6{Addr: addr.As16(), Port: port})
|
err := u.bind(&windows.SockaddrInet6{Addr: addr.As16(), Port: port})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -96,6 +92,25 @@ func (u *RIOConn) bind(sa windows.Sockaddr) error {
|
|||||||
// Enable v4 for this socket
|
// Enable v4 for this socket
|
||||||
syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
|
syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
|
||||||
|
|
||||||
|
// Disable reporting of PORT_UNREACHABLE and NET_UNREACHABLE errors from the UDP socket receive call.
|
||||||
|
// These errors are returned on Windows during UDP receives based on the receipt of ICMP packets. Disable
|
||||||
|
// the UDP receive error returns with these ioctl calls.
|
||||||
|
ret := uint32(0)
|
||||||
|
flag := uint32(0)
|
||||||
|
size := uint32(unsafe.Sizeof(flag))
|
||||||
|
err = syscall.WSAIoctl(syscall.Handle(u.sock), syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ret = 0
|
||||||
|
flag = 0
|
||||||
|
size = uint32(unsafe.Sizeof(flag))
|
||||||
|
SIO_UDP_NETRESET := uint32(syscall.IOC_IN | syscall.IOC_VENDOR | 15)
|
||||||
|
err = syscall.WSAIoctl(syscall.Handle(u.sock), SIO_UDP_NETRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
err = u.rx.Open()
|
err = u.rx.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -126,8 +141,12 @@ func (u *RIOConn) ListenOut(r EncReader) {
|
|||||||
// Just read one packet at a time
|
// Just read one packet at a time
|
||||||
n, rua, err := u.receive(buffer)
|
n, rua, err := u.receive(buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
|
if errors.Is(err, net.ErrClosed) {
|
||||||
return
|
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
u.l.WithError(err).Error("unexpected udp socket receive error")
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
r(netip.AddrPortFrom(netip.AddrFrom16(rua.Addr).Unmap(), (rua.Port>>8)|((rua.Port&0xff)<<8)), buffer[:n])
|
r(netip.AddrPortFrom(netip.AddrFrom16(rua.Addr).Unmap(), (rua.Port>>8)|((rua.Port&0xff)<<8)), buffer[:n])
|
||||||
|
|||||||
225
udp/wireguard_conn_linux.go
Normal file
225
udp/wireguard_conn_linux.go
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
//go:build linux && !android && !e2e_testing
|
||||||
|
|
||||||
|
package udp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
wgconn "github.com/slackhq/nebula/wgstack/conn"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WGConn adapts WireGuard's batched UDP bind implementation to Nebula's udp.Conn interface.
|
||||||
|
type WGConn struct {
|
||||||
|
l *logrus.Logger
|
||||||
|
bind *wgconn.StdNetBind
|
||||||
|
recvers []wgconn.ReceiveFunc
|
||||||
|
batch int
|
||||||
|
reqBatch int
|
||||||
|
localIP netip.Addr
|
||||||
|
localPort uint16
|
||||||
|
enableGSO bool
|
||||||
|
enableGRO bool
|
||||||
|
gsoMaxSeg int
|
||||||
|
closed atomic.Bool
|
||||||
|
|
||||||
|
closeOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWireguardListener creates a UDP listener backed by WireGuard's StdNetBind.
|
||||||
|
func NewWireguardListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) {
|
||||||
|
bind := wgconn.NewStdNetBindForAddr(ip, multi)
|
||||||
|
recvers, actualPort, err := bind.Open(uint16(port))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if batch <= 0 {
|
||||||
|
batch = bind.BatchSize()
|
||||||
|
} else if batch > bind.BatchSize() {
|
||||||
|
batch = bind.BatchSize()
|
||||||
|
}
|
||||||
|
return &WGConn{
|
||||||
|
l: l,
|
||||||
|
bind: bind,
|
||||||
|
recvers: recvers,
|
||||||
|
batch: batch,
|
||||||
|
reqBatch: batch,
|
||||||
|
localIP: ip,
|
||||||
|
localPort: actualPort,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) Rebind() error {
|
||||||
|
// WireGuard's bind does not support rebinding in place.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) LocalAddr() (netip.AddrPort, error) {
|
||||||
|
if !c.localIP.IsValid() || c.localIP.IsUnspecified() {
|
||||||
|
// Fallback to wildcard IPv4 for display purposes.
|
||||||
|
return netip.AddrPortFrom(netip.IPv4Unspecified(), c.localPort), nil
|
||||||
|
}
|
||||||
|
return netip.AddrPortFrom(c.localIP, c.localPort), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) listen(fn wgconn.ReceiveFunc, r EncReader) {
|
||||||
|
batchSize := c.batch
|
||||||
|
packets := make([][]byte, batchSize)
|
||||||
|
for i := range packets {
|
||||||
|
packets[i] = make([]byte, MTU)
|
||||||
|
}
|
||||||
|
sizes := make([]int, batchSize)
|
||||||
|
endpoints := make([]wgconn.Endpoint, batchSize)
|
||||||
|
|
||||||
|
for {
|
||||||
|
if c.closed.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err := fn(packets, sizes, endpoints)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, net.ErrClosed) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.l != nil {
|
||||||
|
c.l.WithError(err).Debug("wireguard UDP listener receive error")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if sizes[i] == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stdEp, ok := endpoints[i].(*wgconn.StdNetEndpoint)
|
||||||
|
if !ok {
|
||||||
|
if c.l != nil {
|
||||||
|
c.l.Warn("wireguard UDP listener received unexpected endpoint type")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addr := stdEp.AddrPort
|
||||||
|
r(addr, packets[i][:sizes[i]])
|
||||||
|
endpoints[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) ListenOut(r EncReader) {
|
||||||
|
for _, fn := range c.recvers {
|
||||||
|
go c.listen(fn, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) WriteTo(b []byte, addr netip.AddrPort) error {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if c.closed.Load() {
|
||||||
|
return net.ErrClosed
|
||||||
|
}
|
||||||
|
ep := &wgconn.StdNetEndpoint{AddrPort: addr}
|
||||||
|
return c.bind.Send([][]byte{b}, ep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) WriteBatch(datagrams []Datagram) error {
|
||||||
|
if len(datagrams) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if c.closed.Load() {
|
||||||
|
return net.ErrClosed
|
||||||
|
}
|
||||||
|
max := c.batch
|
||||||
|
if max <= 0 {
|
||||||
|
max = len(datagrams)
|
||||||
|
if max == 0 {
|
||||||
|
max = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bufs := make([][]byte, 0, max)
|
||||||
|
var (
|
||||||
|
current netip.AddrPort
|
||||||
|
endpoint *wgconn.StdNetEndpoint
|
||||||
|
haveAddr bool
|
||||||
|
)
|
||||||
|
flush := func() error {
|
||||||
|
if len(bufs) == 0 || endpoint == nil {
|
||||||
|
bufs = bufs[:0]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := c.bind.Send(bufs, endpoint)
|
||||||
|
bufs = bufs[:0]
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range datagrams {
|
||||||
|
if len(d.Payload) == 0 || !d.Addr.IsValid() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !haveAddr || d.Addr != current {
|
||||||
|
if err := flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
current = d.Addr
|
||||||
|
endpoint = &wgconn.StdNetEndpoint{AddrPort: current}
|
||||||
|
haveAddr = true
|
||||||
|
}
|
||||||
|
bufs = append(bufs, d.Payload)
|
||||||
|
if len(bufs) >= max {
|
||||||
|
if err := flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) ConfigureOffload(enableGSO, enableGRO bool, maxSegments int) {
|
||||||
|
c.enableGSO = enableGSO
|
||||||
|
c.enableGRO = enableGRO
|
||||||
|
if maxSegments <= 0 {
|
||||||
|
maxSegments = 1
|
||||||
|
} else if maxSegments > wgconn.IdealBatchSize {
|
||||||
|
maxSegments = wgconn.IdealBatchSize
|
||||||
|
}
|
||||||
|
c.gsoMaxSeg = maxSegments
|
||||||
|
|
||||||
|
effectiveBatch := c.reqBatch
|
||||||
|
if enableGSO && c.bind != nil {
|
||||||
|
bindBatch := c.bind.BatchSize()
|
||||||
|
if effectiveBatch < bindBatch {
|
||||||
|
if c.l != nil {
|
||||||
|
c.l.WithFields(logrus.Fields{
|
||||||
|
"requested": c.reqBatch,
|
||||||
|
"effective": bindBatch,
|
||||||
|
}).Warn("listen.batch below wireguard minimum; using bind batch size for UDP GSO support")
|
||||||
|
}
|
||||||
|
effectiveBatch = bindBatch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.batch = effectiveBatch
|
||||||
|
|
||||||
|
if c.l != nil {
|
||||||
|
c.l.WithFields(logrus.Fields{
|
||||||
|
"enableGSO": enableGSO,
|
||||||
|
"enableGRO": enableGRO,
|
||||||
|
"gsoMaxSegments": maxSegments,
|
||||||
|
}).Debug("configured wireguard UDP offload")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) ReloadConfig(*config.C) {
|
||||||
|
// WireGuard bind currently does not expose runtime configuration knobs.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *WGConn) Close() error {
|
||||||
|
var err error
|
||||||
|
c.closeOnce.Do(func() {
|
||||||
|
c.closed.Store(true)
|
||||||
|
err = c.bind.Close()
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
15
udp/wireguard_conn_unsupported.go
Normal file
15
udp/wireguard_conn_unsupported.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
//go:build !linux || android || e2e_testing
|
||||||
|
|
||||||
|
package udp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewWireguardListener is only available on Linux builds.
|
||||||
|
func NewWireguardListener(*logrus.Logger, netip.Addr, int, bool, int) (Conn, error) {
|
||||||
|
return nil, fmt.Errorf("wireguard experimental UDP listener is only supported on Linux")
|
||||||
|
}
|
||||||
539
wgstack/conn/bind_std.go
Normal file
539
wgstack/conn/bind_std.go
Normal file
@@ -0,0 +1,539 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/net/ipv4"
|
||||||
|
"golang.org/x/net/ipv6"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Bind = (*StdNetBind)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdNetBind implements Bind for all platforms. While Windows has its own Bind
|
||||||
|
// (see bind_windows.go), it may fall back to StdNetBind.
|
||||||
|
// TODO: Remove usage of ipv{4,6}.PacketConn when net.UDPConn has comparable
|
||||||
|
// methods for sending and receiving multiple datagrams per-syscall. See the
|
||||||
|
// proposal in https://github.com/golang/go/issues/45886#issuecomment-1218301564.
|
||||||
|
type StdNetBind struct {
|
||||||
|
mu sync.Mutex // protects all fields except as specified
|
||||||
|
ipv4 *net.UDPConn
|
||||||
|
ipv6 *net.UDPConn
|
||||||
|
ipv4PC *ipv4.PacketConn // will be nil on non-Linux
|
||||||
|
ipv6PC *ipv6.PacketConn // will be nil on non-Linux
|
||||||
|
|
||||||
|
// these three fields are not guarded by mu
|
||||||
|
udpAddrPool sync.Pool
|
||||||
|
ipv4MsgsPool sync.Pool
|
||||||
|
ipv6MsgsPool sync.Pool
|
||||||
|
|
||||||
|
blackhole4 bool
|
||||||
|
blackhole6 bool
|
||||||
|
|
||||||
|
listenAddr4 string
|
||||||
|
listenAddr6 string
|
||||||
|
bindV4 bool
|
||||||
|
bindV6 bool
|
||||||
|
reusePort bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStdNetBind() *StdNetBind {
|
||||||
|
return &StdNetBind{
|
||||||
|
udpAddrPool: sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
return &net.UDPAddr{
|
||||||
|
IP: make([]byte, 16),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
ipv4MsgsPool: sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
msgs := make([]ipv4.Message, IdealBatchSize)
|
||||||
|
for i := range msgs {
|
||||||
|
msgs[i].Buffers = make(net.Buffers, 1)
|
||||||
|
msgs[i].OOB = make([]byte, srcControlSize)
|
||||||
|
}
|
||||||
|
return &msgs
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
ipv6MsgsPool: sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
msgs := make([]ipv6.Message, IdealBatchSize)
|
||||||
|
for i := range msgs {
|
||||||
|
msgs[i].Buffers = make(net.Buffers, 1)
|
||||||
|
msgs[i].OOB = make([]byte, srcControlSize)
|
||||||
|
}
|
||||||
|
return &msgs
|
||||||
|
},
|
||||||
|
},
|
||||||
|
bindV4: true,
|
||||||
|
bindV6: true,
|
||||||
|
reusePort: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStdNetBind creates a bind that listens on all interfaces.
|
||||||
|
func NewStdNetBind() *StdNetBind {
|
||||||
|
return newStdNetBind()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStdNetBindForAddr creates a bind that listens on a specific address.
|
||||||
|
// If addr is IPv4, only the IPv4 socket will be created. For IPv6, only the
|
||||||
|
// IPv6 socket will be created.
|
||||||
|
func NewStdNetBindForAddr(addr netip.Addr, reusePort bool) *StdNetBind {
|
||||||
|
b := newStdNetBind()
|
||||||
|
if addr.IsValid() {
|
||||||
|
if addr.IsUnspecified() {
|
||||||
|
// keep dual-stack defaults with empty listen addresses
|
||||||
|
} else if addr.Is4() {
|
||||||
|
b.listenAddr4 = addr.Unmap().String()
|
||||||
|
b.bindV4 = true
|
||||||
|
b.bindV6 = false
|
||||||
|
} else {
|
||||||
|
b.listenAddr6 = addr.Unmap().String()
|
||||||
|
b.bindV6 = true
|
||||||
|
b.bindV4 = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.reusePort = reusePort
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
type StdNetEndpoint struct {
|
||||||
|
// AddrPort is the endpoint destination.
|
||||||
|
netip.AddrPort
|
||||||
|
// src is the current sticky source address and interface index, if supported.
|
||||||
|
src struct {
|
||||||
|
netip.Addr
|
||||||
|
ifidx int32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Bind = (*StdNetBind)(nil)
|
||||||
|
_ Endpoint = &StdNetEndpoint{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (*StdNetBind) ParseEndpoint(s string) (Endpoint, error) {
|
||||||
|
e, err := netip.ParseAddrPort(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &StdNetEndpoint{
|
||||||
|
AddrPort: e,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) ClearSrc() {
|
||||||
|
e.src.ifidx = 0
|
||||||
|
e.src.Addr = netip.Addr{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) DstIP() netip.Addr {
|
||||||
|
return e.AddrPort.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) SrcIP() netip.Addr {
|
||||||
|
return e.src.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) SrcIfidx() int32 {
|
||||||
|
return e.src.ifidx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) DstToBytes() []byte {
|
||||||
|
b, _ := e.AddrPort.MarshalBinary()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) DstToString() string {
|
||||||
|
return e.AddrPort.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) SrcToString() string {
|
||||||
|
return e.src.Addr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) listenNet(network string, host string, port int) (*net.UDPConn, int, error) {
|
||||||
|
lc := listenConfig()
|
||||||
|
if s.reusePort {
|
||||||
|
base := lc.Control
|
||||||
|
lc.Control = func(network, address string, c syscall.RawConn) error {
|
||||||
|
if base != nil {
|
||||||
|
if err := base(network, address, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c.Control(func(fd uintptr) {
|
||||||
|
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := ":" + strconv.Itoa(port)
|
||||||
|
if host != "" {
|
||||||
|
addr = net.JoinHostPort(host, strconv.Itoa(port))
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := lc.ListenPacket(context.Background(), network, addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve port.
|
||||||
|
laddr := conn.LocalAddr()
|
||||||
|
uaddr, err := net.ResolveUDPAddr(
|
||||||
|
laddr.Network(),
|
||||||
|
laddr.String(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
return conn.(*net.UDPConn), uaddr.Port, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) openIPv4(port int) (*net.UDPConn, *ipv4.PacketConn, int, error) {
|
||||||
|
if !s.bindV4 {
|
||||||
|
return nil, nil, port, nil
|
||||||
|
}
|
||||||
|
host := s.listenAddr4
|
||||||
|
conn, actualPort, err := s.listenNet("udp4", host, port)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, syscall.EAFNOSUPPORT) {
|
||||||
|
return nil, nil, port, nil
|
||||||
|
}
|
||||||
|
return nil, nil, port, err
|
||||||
|
}
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
return conn, nil, actualPort, nil
|
||||||
|
}
|
||||||
|
pc := ipv4.NewPacketConn(conn)
|
||||||
|
return conn, pc, actualPort, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) openIPv6(port int) (*net.UDPConn, *ipv6.PacketConn, int, error) {
|
||||||
|
if !s.bindV6 {
|
||||||
|
return nil, nil, port, nil
|
||||||
|
}
|
||||||
|
host := s.listenAddr6
|
||||||
|
conn, actualPort, err := s.listenNet("udp6", host, port)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, syscall.EAFNOSUPPORT) {
|
||||||
|
return nil, nil, port, nil
|
||||||
|
}
|
||||||
|
return nil, nil, port, err
|
||||||
|
}
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
return conn, nil, actualPort, nil
|
||||||
|
}
|
||||||
|
pc := ipv6.NewPacketConn(conn)
|
||||||
|
return conn, pc, actualPort, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) Open(uport uint16) ([]ReceiveFunc, uint16, error) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var tries int
|
||||||
|
|
||||||
|
if s.ipv4 != nil || s.ipv6 != nil {
|
||||||
|
return nil, 0, ErrBindAlreadyOpen
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to open ipv4 and ipv6 listeners on the same port.
|
||||||
|
// If uport is 0, we can retry on failure.
|
||||||
|
again:
|
||||||
|
port := int(uport)
|
||||||
|
var v4conn *net.UDPConn
|
||||||
|
var v6conn *net.UDPConn
|
||||||
|
var v4pc *ipv4.PacketConn
|
||||||
|
var v6pc *ipv6.PacketConn
|
||||||
|
|
||||||
|
v4conn, v4pc, port, err = s.openIPv4(port)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen on the same port as we're using for ipv4.
|
||||||
|
v6conn, v6pc, port, err = s.openIPv6(port)
|
||||||
|
if uport == 0 && errors.Is(err, syscall.EADDRINUSE) && tries < 100 {
|
||||||
|
if v4conn != nil {
|
||||||
|
v4conn.Close()
|
||||||
|
}
|
||||||
|
tries++
|
||||||
|
goto again
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if v4conn != nil {
|
||||||
|
v4conn.Close()
|
||||||
|
}
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fns []ReceiveFunc
|
||||||
|
if v4conn != nil {
|
||||||
|
s.ipv4 = v4conn
|
||||||
|
if v4pc != nil {
|
||||||
|
s.ipv4PC = v4pc
|
||||||
|
}
|
||||||
|
fns = append(fns, s.makeReceiveIPv4(v4pc, v4conn))
|
||||||
|
}
|
||||||
|
if v6conn != nil {
|
||||||
|
s.ipv6 = v6conn
|
||||||
|
if v6pc != nil {
|
||||||
|
s.ipv6PC = v6pc
|
||||||
|
}
|
||||||
|
fns = append(fns, s.makeReceiveIPv6(v6pc, v6conn))
|
||||||
|
}
|
||||||
|
if len(fns) == 0 {
|
||||||
|
return nil, 0, syscall.EAFNOSUPPORT
|
||||||
|
}
|
||||||
|
|
||||||
|
return fns, uint16(port), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) makeReceiveIPv4(pc *ipv4.PacketConn, conn *net.UDPConn) ReceiveFunc {
|
||||||
|
return func(bufs [][]byte, sizes []int, eps []Endpoint) (n int, err error) {
|
||||||
|
msgs := s.ipv4MsgsPool.Get().(*[]ipv4.Message)
|
||||||
|
defer s.ipv4MsgsPool.Put(msgs)
|
||||||
|
for i := range bufs {
|
||||||
|
(*msgs)[i].Buffers[0] = bufs[i]
|
||||||
|
}
|
||||||
|
var numMsgs int
|
||||||
|
if runtime.GOOS == "linux" && pc != nil {
|
||||||
|
numMsgs, err = pc.ReadBatch(*msgs, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
msg := &(*msgs)[0]
|
||||||
|
msg.N, msg.NN, _, msg.Addr, err = conn.ReadMsgUDP(msg.Buffers[0], msg.OOB)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
numMsgs = 1
|
||||||
|
}
|
||||||
|
for i := 0; i < numMsgs; i++ {
|
||||||
|
msg := &(*msgs)[i]
|
||||||
|
sizes[i] = msg.N
|
||||||
|
addrPort := msg.Addr.(*net.UDPAddr).AddrPort()
|
||||||
|
ep := &StdNetEndpoint{AddrPort: addrPort} // TODO: remove allocation
|
||||||
|
getSrcFromControl(msg.OOB[:msg.NN], ep)
|
||||||
|
eps[i] = ep
|
||||||
|
}
|
||||||
|
return numMsgs, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) makeReceiveIPv6(pc *ipv6.PacketConn, conn *net.UDPConn) ReceiveFunc {
|
||||||
|
return func(bufs [][]byte, sizes []int, eps []Endpoint) (n int, err error) {
|
||||||
|
msgs := s.ipv6MsgsPool.Get().(*[]ipv6.Message)
|
||||||
|
defer s.ipv6MsgsPool.Put(msgs)
|
||||||
|
for i := range bufs {
|
||||||
|
(*msgs)[i].Buffers[0] = bufs[i]
|
||||||
|
}
|
||||||
|
var numMsgs int
|
||||||
|
if runtime.GOOS == "linux" && pc != nil {
|
||||||
|
numMsgs, err = pc.ReadBatch(*msgs, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
msg := &(*msgs)[0]
|
||||||
|
msg.N, msg.NN, _, msg.Addr, err = conn.ReadMsgUDP(msg.Buffers[0], msg.OOB)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
numMsgs = 1
|
||||||
|
}
|
||||||
|
for i := 0; i < numMsgs; i++ {
|
||||||
|
msg := &(*msgs)[i]
|
||||||
|
sizes[i] = msg.N
|
||||||
|
addrPort := msg.Addr.(*net.UDPAddr).AddrPort()
|
||||||
|
ep := &StdNetEndpoint{AddrPort: addrPort} // TODO: remove allocation
|
||||||
|
getSrcFromControl(msg.OOB[:msg.NN], ep)
|
||||||
|
eps[i] = ep
|
||||||
|
}
|
||||||
|
return numMsgs, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: When all Binds handle IdealBatchSize, remove this dynamic function and
|
||||||
|
// rename the IdealBatchSize constant to BatchSize.
|
||||||
|
func (s *StdNetBind) BatchSize() int {
|
||||||
|
if runtime.GOOS == "linux" {
|
||||||
|
return IdealBatchSize
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) Close() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
var err1, err2 error
|
||||||
|
if s.ipv4 != nil {
|
||||||
|
err1 = s.ipv4.Close()
|
||||||
|
s.ipv4 = nil
|
||||||
|
s.ipv4PC = nil
|
||||||
|
}
|
||||||
|
if s.ipv6 != nil {
|
||||||
|
err2 = s.ipv6.Close()
|
||||||
|
s.ipv6 = nil
|
||||||
|
s.ipv6PC = nil
|
||||||
|
}
|
||||||
|
s.blackhole4 = false
|
||||||
|
s.blackhole6 = false
|
||||||
|
if err1 != nil {
|
||||||
|
return err1
|
||||||
|
}
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) Send(bufs [][]byte, endpoint Endpoint) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
blackhole := s.blackhole4
|
||||||
|
conn := s.ipv4
|
||||||
|
var (
|
||||||
|
pc4 *ipv4.PacketConn
|
||||||
|
pc6 *ipv6.PacketConn
|
||||||
|
)
|
||||||
|
is6 := false
|
||||||
|
if endpoint.DstIP().Is6() {
|
||||||
|
blackhole = s.blackhole6
|
||||||
|
conn = s.ipv6
|
||||||
|
pc6 = s.ipv6PC
|
||||||
|
is6 = true
|
||||||
|
} else {
|
||||||
|
pc4 = s.ipv4PC
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
if blackhole {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if conn == nil {
|
||||||
|
return syscall.EAFNOSUPPORT
|
||||||
|
}
|
||||||
|
if is6 {
|
||||||
|
return s.send6(conn, pc6, endpoint, bufs)
|
||||||
|
} else {
|
||||||
|
return s.send4(conn, pc4, endpoint, bufs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) send4(conn *net.UDPConn, pc *ipv4.PacketConn, ep Endpoint, bufs [][]byte) error {
|
||||||
|
ua := s.udpAddrPool.Get().(*net.UDPAddr)
|
||||||
|
as4 := ep.DstIP().As4()
|
||||||
|
copy(ua.IP, as4[:])
|
||||||
|
ua.IP = ua.IP[:4]
|
||||||
|
ua.Port = int(ep.(*StdNetEndpoint).Port())
|
||||||
|
msgs := s.ipv4MsgsPool.Get().(*[]ipv4.Message)
|
||||||
|
for i, buf := range bufs {
|
||||||
|
(*msgs)[i].Buffers[0] = buf
|
||||||
|
(*msgs)[i].Addr = ua
|
||||||
|
setSrcControl(&(*msgs)[i].OOB, ep.(*StdNetEndpoint))
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
n int
|
||||||
|
err error
|
||||||
|
start int
|
||||||
|
)
|
||||||
|
if runtime.GOOS == "linux" && pc != nil {
|
||||||
|
for {
|
||||||
|
n, err = pc.WriteBatch((*msgs)[start:len(bufs)], 0)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, syscall.EAFNOSUPPORT) {
|
||||||
|
for j := start; j < len(bufs); j++ {
|
||||||
|
_, _, werr := conn.WriteMsgUDP(bufs[j], (*msgs)[j].OOB, ua)
|
||||||
|
if werr != nil {
|
||||||
|
err = werr
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if n == len((*msgs)[start:len(bufs)]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
start += n
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i, buf := range bufs {
|
||||||
|
_, _, err = conn.WriteMsgUDP(buf, (*msgs)[i].OOB, ua)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.udpAddrPool.Put(ua)
|
||||||
|
s.ipv4MsgsPool.Put(msgs)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) send6(conn *net.UDPConn, pc *ipv6.PacketConn, ep Endpoint, bufs [][]byte) error {
|
||||||
|
ua := s.udpAddrPool.Get().(*net.UDPAddr)
|
||||||
|
as16 := ep.DstIP().As16()
|
||||||
|
copy(ua.IP, as16[:])
|
||||||
|
ua.IP = ua.IP[:16]
|
||||||
|
ua.Port = int(ep.(*StdNetEndpoint).Port())
|
||||||
|
msgs := s.ipv6MsgsPool.Get().(*[]ipv6.Message)
|
||||||
|
for i, buf := range bufs {
|
||||||
|
(*msgs)[i].Buffers[0] = buf
|
||||||
|
(*msgs)[i].Addr = ua
|
||||||
|
setSrcControl(&(*msgs)[i].OOB, ep.(*StdNetEndpoint))
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
n int
|
||||||
|
err error
|
||||||
|
start int
|
||||||
|
)
|
||||||
|
if runtime.GOOS == "linux" && pc != nil {
|
||||||
|
for {
|
||||||
|
n, err = pc.WriteBatch((*msgs)[start:len(bufs)], 0)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, syscall.EAFNOSUPPORT) {
|
||||||
|
for j := start; j < len(bufs); j++ {
|
||||||
|
_, _, werr := conn.WriteMsgUDP(bufs[j], (*msgs)[j].OOB, ua)
|
||||||
|
if werr != nil {
|
||||||
|
err = werr
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if n == len((*msgs)[start:len(bufs)]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
start += n
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i, buf := range bufs {
|
||||||
|
_, _, err = conn.WriteMsgUDP(buf, (*msgs)[i].OOB, ua)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.udpAddrPool.Put(ua)
|
||||||
|
s.ipv6MsgsPool.Put(msgs)
|
||||||
|
return err
|
||||||
|
}
|
||||||
131
wgstack/conn/conn.go
Normal file
131
wgstack/conn/conn.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
IdealBatchSize = 128 // maximum number of packets handled per read and write
|
||||||
|
)
|
||||||
|
|
||||||
|
// A ReceiveFunc receives at least one packet from the network and writes them
|
||||||
|
// into packets. On a successful read it returns the number of elements of
|
||||||
|
// sizes, packets, and endpoints that should be evaluated. Some elements of
|
||||||
|
// sizes may be zero, and callers should ignore them. Callers must pass a sizes
|
||||||
|
// and eps slice with a length greater than or equal to the length of packets.
|
||||||
|
// These lengths must not exceed the length of the associated Bind.BatchSize().
|
||||||
|
type ReceiveFunc func(packets [][]byte, sizes []int, eps []Endpoint) (n int, err error)
|
||||||
|
|
||||||
|
// A Bind listens on a port for both IPv6 and IPv4 UDP traffic.
|
||||||
|
//
|
||||||
|
// A Bind interface may also be a PeekLookAtSocketFd or BindSocketToInterface,
|
||||||
|
// depending on the platform-specific implementation.
|
||||||
|
type Bind interface {
|
||||||
|
// Open puts the Bind into a listening state on a given port and reports the actual
|
||||||
|
// port that it bound to. Passing zero results in a random selection.
|
||||||
|
// fns is the set of functions that will be called to receive packets.
|
||||||
|
Open(port uint16) (fns []ReceiveFunc, actualPort uint16, err error)
|
||||||
|
|
||||||
|
// Close closes the Bind listener.
|
||||||
|
// All fns returned by Open must return net.ErrClosed after a call to Close.
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// SetMark sets the mark for each packet sent through this Bind.
|
||||||
|
// This mark is passed to the kernel as the socket option SO_MARK.
|
||||||
|
SetMark(mark uint32) error
|
||||||
|
|
||||||
|
// Send writes one or more packets in bufs to address ep. The length of
|
||||||
|
// bufs must not exceed BatchSize().
|
||||||
|
Send(bufs [][]byte, ep Endpoint) error
|
||||||
|
|
||||||
|
// ParseEndpoint creates a new endpoint from a string.
|
||||||
|
ParseEndpoint(s string) (Endpoint, error)
|
||||||
|
|
||||||
|
// BatchSize is the number of buffers expected to be passed to
|
||||||
|
// the ReceiveFuncs, and the maximum expected to be passed to SendBatch.
|
||||||
|
BatchSize() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// BindSocketToInterface is implemented by Bind objects that support being
|
||||||
|
// tied to a single network interface. Used by wireguard-windows.
|
||||||
|
type BindSocketToInterface interface {
|
||||||
|
BindSocketToInterface4(interfaceIndex uint32, blackhole bool) error
|
||||||
|
BindSocketToInterface6(interfaceIndex uint32, blackhole bool) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeekLookAtSocketFd is implemented by Bind objects that support having their
|
||||||
|
// file descriptor peeked at. Used by wireguard-android.
|
||||||
|
type PeekLookAtSocketFd interface {
|
||||||
|
PeekLookAtSocketFd4() (fd int, err error)
|
||||||
|
PeekLookAtSocketFd6() (fd int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Endpoint maintains the source/destination caching for a peer.
|
||||||
|
//
|
||||||
|
// dst: the remote address of a peer ("endpoint" in uapi terminology)
|
||||||
|
// src: the local address from which datagrams originate going to the peer
|
||||||
|
type Endpoint interface {
|
||||||
|
ClearSrc() // clears the source address
|
||||||
|
SrcToString() string // returns the local source address (ip:port)
|
||||||
|
DstToString() string // returns the destination address (ip:port)
|
||||||
|
DstToBytes() []byte // used for mac2 cookie calculations
|
||||||
|
DstIP() netip.Addr
|
||||||
|
SrcIP() netip.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrBindAlreadyOpen = errors.New("bind is already open")
|
||||||
|
ErrWrongEndpointType = errors.New("endpoint type does not correspond with bind type")
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fn ReceiveFunc) PrettyName() string {
|
||||||
|
name := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
|
||||||
|
// 0. cheese/taco.beansIPv6.func12.func21218-fm
|
||||||
|
name = strings.TrimSuffix(name, "-fm")
|
||||||
|
// 1. cheese/taco.beansIPv6.func12.func21218
|
||||||
|
if idx := strings.LastIndexByte(name, '/'); idx != -1 {
|
||||||
|
name = name[idx+1:]
|
||||||
|
// 2. taco.beansIPv6.func12.func21218
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
var idx int
|
||||||
|
for idx = len(name) - 1; idx >= 0; idx-- {
|
||||||
|
if name[idx] < '0' || name[idx] > '9' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if idx == len(name)-1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
const dotFunc = ".func"
|
||||||
|
if !strings.HasSuffix(name[:idx+1], dotFunc) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
name = name[:idx+1-len(dotFunc)]
|
||||||
|
// 3. taco.beansIPv6.func12
|
||||||
|
// 4. taco.beansIPv6
|
||||||
|
}
|
||||||
|
if idx := strings.LastIndexByte(name, '.'); idx != -1 {
|
||||||
|
name = name[idx+1:]
|
||||||
|
// 5. beansIPv6
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
return fmt.Sprintf("%p", fn)
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(name, "IPv4") {
|
||||||
|
return "v4"
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(name, "IPv6") {
|
||||||
|
return "v6"
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
42
wgstack/conn/controlfns.go
Normal file
42
wgstack/conn/controlfns.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UDP socket read/write buffer size (7MB). The value of 7MB is chosen as it is
|
||||||
|
// the max supported by a default configuration of macOS. Some platforms will
|
||||||
|
// silently clamp the value to other maximums, such as linux clamping to
|
||||||
|
// net.core.{r,w}mem_max (see _linux.go for additional implementation that works
|
||||||
|
// around this limitation)
|
||||||
|
const socketBufferSize = 7 << 20
|
||||||
|
|
||||||
|
// controlFn is the callback function signature from net.ListenConfig.Control.
|
||||||
|
// It is used to apply platform specific configuration to the socket prior to
|
||||||
|
// bind.
|
||||||
|
type controlFn func(network, address string, c syscall.RawConn) error
|
||||||
|
|
||||||
|
// controlFns is a list of functions that are called from the listen config
|
||||||
|
// that can apply socket options.
|
||||||
|
var controlFns = []controlFn{}
|
||||||
|
|
||||||
|
// listenConfig returns a net.ListenConfig that applies the controlFns to the
|
||||||
|
// socket prior to bind. This is used to apply socket buffer sizing and packet
|
||||||
|
// information OOB configuration for sticky sockets.
|
||||||
|
func listenConfig() *net.ListenConfig {
|
||||||
|
return &net.ListenConfig{
|
||||||
|
Control: func(network, address string, c syscall.RawConn) error {
|
||||||
|
for _, fn := range controlFns {
|
||||||
|
if err := fn(network, address, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
62
wgstack/conn/controlfns_linux.go
Normal file
62
wgstack/conn/controlfns_linux.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
controlFns = append(controlFns,
|
||||||
|
|
||||||
|
// Attempt to set the socket buffer size beyond net.core.{r,w}mem_max by
|
||||||
|
// using SO_*BUFFORCE. This requires CAP_NET_ADMIN, and is allowed here to
|
||||||
|
// fail silently - the result of failure is lower performance on very fast
|
||||||
|
// links or high latency links.
|
||||||
|
func(network, address string, c syscall.RawConn) error {
|
||||||
|
return c.Control(func(fd uintptr) {
|
||||||
|
// Set up to *mem_max
|
||||||
|
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_RCVBUF, socketBufferSize)
|
||||||
|
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_SNDBUF, socketBufferSize)
|
||||||
|
// Set beyond *mem_max if CAP_NET_ADMIN
|
||||||
|
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, socketBufferSize)
|
||||||
|
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, socketBufferSize)
|
||||||
|
})
|
||||||
|
},
|
||||||
|
|
||||||
|
// Enable receiving of the packet information (IP_PKTINFO for IPv4,
|
||||||
|
// IPV6_PKTINFO for IPv6) that is used to implement sticky socket support.
|
||||||
|
func(network, address string, c syscall.RawConn) error {
|
||||||
|
var err error
|
||||||
|
switch network {
|
||||||
|
case "udp4":
|
||||||
|
if runtime.GOOS != "android" {
|
||||||
|
c.Control(func(fd uintptr) {
|
||||||
|
err = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_PKTINFO, 1)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case "udp6":
|
||||||
|
c.Control(func(fd uintptr) {
|
||||||
|
if runtime.GOOS != "android" {
|
||||||
|
err = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_RECVPKTINFO, 1)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_V6ONLY, 1)
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("unhandled network: %s: %w", network, unix.EINVAL)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
9
wgstack/conn/default.go
Normal file
9
wgstack/conn/default.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
func NewDefaultBind() Bind { return NewStdNetBind() }
|
||||||
12
wgstack/conn/errors_default.go
Normal file
12
wgstack/conn/errors_default.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
/* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
func errShouldDisableUDPGSO(err error) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
26
wgstack/conn/errors_linux.go
Normal file
26
wgstack/conn/errors_linux.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
/* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func errShouldDisableUDPGSO(err error) bool {
|
||||||
|
var serr *os.SyscallError
|
||||||
|
if errors.As(err, &serr) {
|
||||||
|
// EIO is returned by udp_send_skb() if the device driver does not have
|
||||||
|
// tx checksumming enabled, which is a hard requirement of UDP_SEGMENT.
|
||||||
|
// See:
|
||||||
|
// https://git.kernel.org/pub/scm/docs/man-pages/man-pages.git/tree/man7/udp.7?id=806eabd74910447f21005160e90957bde4db0183#n228
|
||||||
|
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/ipv4/udp.c?h=v6.2&id=c9c3395d5e3dcc6daee66c6908354d47bf98cb0c#n942
|
||||||
|
return serr.Err == unix.EIO
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
15
wgstack/conn/features_default.go
Normal file
15
wgstack/conn/features_default.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
//go:build !linux
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
/* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
|
func supportsUDPOffload(conn *net.UDPConn) (txOffload, rxOffload bool) {
|
||||||
|
return
|
||||||
|
}
|
||||||
29
wgstack/conn/features_linux.go
Normal file
29
wgstack/conn/features_linux.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
/* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func supportsUDPOffload(conn *net.UDPConn) (txOffload, rxOffload bool) {
|
||||||
|
rc, err := conn.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = rc.Control(func(fd uintptr) {
|
||||||
|
_, errSyscall := unix.GetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_SEGMENT)
|
||||||
|
txOffload = errSyscall == nil
|
||||||
|
opt, errSyscall := unix.GetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_GRO)
|
||||||
|
rxOffload = errSyscall == nil && opt == 1
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
return txOffload, rxOffload
|
||||||
|
}
|
||||||
21
wgstack/conn/gso_default.go
Normal file
21
wgstack/conn/gso_default.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
/* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
// getGSOSize parses control for UDP_GRO and if found returns its GSO size data.
|
||||||
|
func getGSOSize(control []byte) (int, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setGSOSize sets a UDP_SEGMENT in control based on gsoSize.
|
||||||
|
func setGSOSize(control *[]byte, gsoSize uint16) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// gsoControlSize returns the recommended buffer size for pooling sticky and UDP
|
||||||
|
// offloading control data.
|
||||||
|
const gsoControlSize = 0
|
||||||
65
wgstack/conn/gso_linux.go
Normal file
65
wgstack/conn/gso_linux.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
/* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeOfGSOData = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// getGSOSize parses control for UDP_GRO and if found returns its GSO size data.
|
||||||
|
func getGSOSize(control []byte) (int, error) {
|
||||||
|
var (
|
||||||
|
hdr unix.Cmsghdr
|
||||||
|
data []byte
|
||||||
|
rem = control
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
for len(rem) > unix.SizeofCmsghdr {
|
||||||
|
hdr, data, rem, err = unix.ParseOneSocketControlMessage(rem)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error parsing socket control message: %w", err)
|
||||||
|
}
|
||||||
|
if hdr.Level == unix.SOL_UDP && hdr.Type == unix.UDP_GRO && len(data) >= sizeOfGSOData {
|
||||||
|
var gso uint16
|
||||||
|
copy(unsafe.Slice((*byte)(unsafe.Pointer(&gso)), sizeOfGSOData), data[:sizeOfGSOData])
|
||||||
|
return int(gso), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setGSOSize sets a UDP_SEGMENT in control based on gsoSize. It leaves existing
|
||||||
|
// data in control untouched.
|
||||||
|
func setGSOSize(control *[]byte, gsoSize uint16) {
|
||||||
|
existingLen := len(*control)
|
||||||
|
avail := cap(*control) - existingLen
|
||||||
|
space := unix.CmsgSpace(sizeOfGSOData)
|
||||||
|
if avail < space {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*control = (*control)[:cap(*control)]
|
||||||
|
gsoControl := (*control)[existingLen:]
|
||||||
|
hdr := (*unix.Cmsghdr)(unsafe.Pointer(&(gsoControl)[0]))
|
||||||
|
hdr.Level = unix.SOL_UDP
|
||||||
|
hdr.Type = unix.UDP_SEGMENT
|
||||||
|
hdr.SetLen(unix.CmsgLen(sizeOfGSOData))
|
||||||
|
copy((gsoControl)[unix.CmsgLen(0):], unsafe.Slice((*byte)(unsafe.Pointer(&gsoSize)), sizeOfGSOData))
|
||||||
|
*control = (*control)[:existingLen+space]
|
||||||
|
}
|
||||||
|
|
||||||
|
// gsoControlSize returns the recommended buffer size for pooling UDP
|
||||||
|
// offloading control data.
|
||||||
|
var gsoControlSize = unix.CmsgSpace(sizeOfGSOData)
|
||||||
64
wgstack/conn/mark_unix.go
Normal file
64
wgstack/conn/mark_unix.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
//go:build linux || openbsd || freebsd
|
||||||
|
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var fwmarkIoctl int
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "linux", "android":
|
||||||
|
fwmarkIoctl = 36 /* unix.SO_MARK */
|
||||||
|
case "freebsd":
|
||||||
|
fwmarkIoctl = 0x1015 /* unix.SO_USER_COOKIE */
|
||||||
|
case "openbsd":
|
||||||
|
fwmarkIoctl = 0x1021 /* unix.SO_RTABLE */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StdNetBind) SetMark(mark uint32) error {
|
||||||
|
var operr error
|
||||||
|
if fwmarkIoctl == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.ipv4 != nil {
|
||||||
|
fd, err := s.ipv4.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = fd.Control(func(fd uintptr) {
|
||||||
|
operr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, fwmarkIoctl, int(mark))
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
err = operr
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.ipv6 != nil {
|
||||||
|
fd, err := s.ipv6.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = fd.Control(func(fd uintptr) {
|
||||||
|
operr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, fwmarkIoctl, int(mark))
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
err = operr
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
42
wgstack/conn/sticky_default.go
Normal file
42
wgstack/conn/sticky_default.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
//go:build !linux || android
|
||||||
|
|
||||||
|
/* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import "net/netip"
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) SrcIP() netip.Addr {
|
||||||
|
return netip.Addr{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) SrcIfidx() int32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StdNetEndpoint) SrcToString() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: macOS, FreeBSD and other BSDs likely do support the sticky sockets
|
||||||
|
// {get,set}srcControl feature set, but use alternatively named flags and need
|
||||||
|
// ports and require testing.
|
||||||
|
|
||||||
|
// getSrcFromControl parses the control for PKTINFO and if found updates ep with
|
||||||
|
// the source information found.
|
||||||
|
func getSrcFromControl(control []byte, ep *StdNetEndpoint) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// setSrcControl parses the control for PKTINFO and if found updates ep with
|
||||||
|
// the source information found.
|
||||||
|
func setSrcControl(control *[]byte, ep *StdNetEndpoint) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// stickyControlSize returns the recommended buffer size for pooling sticky
|
||||||
|
// offloading control data.
|
||||||
|
const stickyControlSize = 0
|
||||||
|
|
||||||
|
const StdNetSupportsStickySockets = false
|
||||||
116
wgstack/conn/sticky_linux.go
Normal file
116
wgstack/conn/sticky_linux.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
//go:build linux && !android
|
||||||
|
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package conn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getSrcFromControl parses the control for PKTINFO and if found updates ep with
|
||||||
|
// the source information found.
|
||||||
|
func getSrcFromControl(control []byte, ep *StdNetEndpoint) {
|
||||||
|
ep.ClearSrc()
|
||||||
|
|
||||||
|
var (
|
||||||
|
hdr unix.Cmsghdr
|
||||||
|
data []byte
|
||||||
|
rem []byte = control
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
for len(rem) > unix.SizeofCmsghdr {
|
||||||
|
hdr, data, rem, err = unix.ParseOneSocketControlMessage(rem)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Level == unix.IPPROTO_IP &&
|
||||||
|
hdr.Type == unix.IP_PKTINFO {
|
||||||
|
|
||||||
|
info := pktInfoFromBuf[unix.Inet4Pktinfo](data)
|
||||||
|
ep.src.Addr = netip.AddrFrom4(info.Spec_dst)
|
||||||
|
ep.src.ifidx = info.Ifindex
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Level == unix.IPPROTO_IPV6 &&
|
||||||
|
hdr.Type == unix.IPV6_PKTINFO {
|
||||||
|
|
||||||
|
info := pktInfoFromBuf[unix.Inet6Pktinfo](data)
|
||||||
|
ep.src.Addr = netip.AddrFrom16(info.Addr)
|
||||||
|
ep.src.ifidx = int32(info.Ifindex)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pktInfoFromBuf returns type T populated from the provided buf via copy(). It
|
||||||
|
// panics if buf is of insufficient size.
|
||||||
|
func pktInfoFromBuf[T unix.Inet4Pktinfo | unix.Inet6Pktinfo](buf []byte) (t T) {
|
||||||
|
size := int(unsafe.Sizeof(t))
|
||||||
|
if len(buf) < size {
|
||||||
|
panic("pktInfoFromBuf: buffer too small")
|
||||||
|
}
|
||||||
|
copy(unsafe.Slice((*byte)(unsafe.Pointer(&t)), size), buf)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// setSrcControl sets an IP{V6}_PKTINFO in control based on the source address
|
||||||
|
// and source ifindex found in ep. control's len will be set to 0 in the event
|
||||||
|
// that ep is a default value.
|
||||||
|
func setSrcControl(control *[]byte, ep *StdNetEndpoint) {
|
||||||
|
*control = (*control)[:cap(*control)]
|
||||||
|
if len(*control) < int(unsafe.Sizeof(unix.Cmsghdr{})) {
|
||||||
|
*control = (*control)[:0]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ep.src.ifidx == 0 && !ep.SrcIP().IsValid() {
|
||||||
|
*control = (*control)[:0]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(*control) < srcControlSize {
|
||||||
|
*control = (*control)[:0]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr := (*unix.Cmsghdr)(unsafe.Pointer(&(*control)[0]))
|
||||||
|
if ep.SrcIP().Is4() {
|
||||||
|
hdr.Level = unix.IPPROTO_IP
|
||||||
|
hdr.Type = unix.IP_PKTINFO
|
||||||
|
hdr.SetLen(unix.CmsgLen(unix.SizeofInet4Pktinfo))
|
||||||
|
|
||||||
|
info := (*unix.Inet4Pktinfo)(unsafe.Pointer(&(*control)[unix.SizeofCmsghdr]))
|
||||||
|
info.Ifindex = ep.src.ifidx
|
||||||
|
if ep.SrcIP().IsValid() {
|
||||||
|
info.Spec_dst = ep.SrcIP().As4()
|
||||||
|
}
|
||||||
|
*control = (*control)[:unix.CmsgSpace(unix.SizeofInet4Pktinfo)]
|
||||||
|
} else {
|
||||||
|
hdr.Level = unix.IPPROTO_IPV6
|
||||||
|
hdr.Type = unix.IPV6_PKTINFO
|
||||||
|
hdr.SetLen(unix.CmsgLen(unix.SizeofInet6Pktinfo))
|
||||||
|
|
||||||
|
info := (*unix.Inet6Pktinfo)(unsafe.Pointer(&(*control)[unix.SizeofCmsghdr]))
|
||||||
|
info.Ifindex = uint32(ep.src.ifidx)
|
||||||
|
if ep.SrcIP().IsValid() {
|
||||||
|
info.Addr = ep.SrcIP().As16()
|
||||||
|
}
|
||||||
|
*control = (*control)[:unix.CmsgSpace(unix.SizeofInet6Pktinfo)]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var srcControlSize = unix.CmsgSpace(unix.SizeofInet6Pktinfo)
|
||||||
|
|
||||||
|
const StdNetSupportsStickySockets = true
|
||||||
42
wgstack/tun/checksum.go
Normal file
42
wgstack/tun/checksum.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package tun
|
||||||
|
|
||||||
|
import "encoding/binary"
|
||||||
|
|
||||||
|
// TODO: Explore SIMD and/or other assembly optimizations.
|
||||||
|
func checksumNoFold(b []byte, initial uint64) uint64 {
|
||||||
|
ac := initial
|
||||||
|
i := 0
|
||||||
|
n := len(b)
|
||||||
|
for n >= 4 {
|
||||||
|
ac += uint64(binary.BigEndian.Uint32(b[i : i+4]))
|
||||||
|
n -= 4
|
||||||
|
i += 4
|
||||||
|
}
|
||||||
|
for n >= 2 {
|
||||||
|
ac += uint64(binary.BigEndian.Uint16(b[i : i+2]))
|
||||||
|
n -= 2
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
if n == 1 {
|
||||||
|
ac += uint64(b[i]) << 8
|
||||||
|
}
|
||||||
|
return ac
|
||||||
|
}
|
||||||
|
|
||||||
|
func checksum(b []byte, initial uint64) uint16 {
|
||||||
|
ac := checksumNoFold(b, initial)
|
||||||
|
ac = (ac >> 16) + (ac & 0xffff)
|
||||||
|
ac = (ac >> 16) + (ac & 0xffff)
|
||||||
|
ac = (ac >> 16) + (ac & 0xffff)
|
||||||
|
ac = (ac >> 16) + (ac & 0xffff)
|
||||||
|
return uint16(ac)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pseudoHeaderChecksumNoFold(protocol uint8, srcAddr, dstAddr []byte, totalLen uint16) uint64 {
|
||||||
|
sum := checksumNoFold(srcAddr, 0)
|
||||||
|
sum = checksumNoFold(dstAddr, sum)
|
||||||
|
sum = checksumNoFold([]byte{0, protocol}, sum)
|
||||||
|
tmp := make([]byte, 2)
|
||||||
|
binary.BigEndian.PutUint16(tmp, totalLen)
|
||||||
|
return checksumNoFold(tmp, sum)
|
||||||
|
}
|
||||||
3
wgstack/tun/export.go
Normal file
3
wgstack/tun/export.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
package tun
|
||||||
|
|
||||||
|
const VirtioNetHdrLen = virtioNetHdrLen
|
||||||
630
wgstack/tun/tcp_offload_linux.go
Normal file
630
wgstack/tun/tcp_offload_linux.go
Normal file
@@ -0,0 +1,630 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package tun
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
wgconn "github.com/slackhq/nebula/wgstack/conn"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrTooManySegments = errors.New("tun: too many segments for TSO")
|
||||||
|
|
||||||
|
const tcpFlagsOffset = 13
|
||||||
|
|
||||||
|
const (
|
||||||
|
tcpFlagFIN uint8 = 0x01
|
||||||
|
tcpFlagPSH uint8 = 0x08
|
||||||
|
tcpFlagACK uint8 = 0x10
|
||||||
|
)
|
||||||
|
|
||||||
|
// virtioNetHdr is defined in the kernel in include/uapi/linux/virtio_net.h. The
|
||||||
|
// kernel symbol is virtio_net_hdr.
|
||||||
|
type virtioNetHdr struct {
|
||||||
|
flags uint8
|
||||||
|
gsoType uint8
|
||||||
|
hdrLen uint16
|
||||||
|
gsoSize uint16
|
||||||
|
csumStart uint16
|
||||||
|
csumOffset uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *virtioNetHdr) decode(b []byte) error {
|
||||||
|
if len(b) < virtioNetHdrLen {
|
||||||
|
return io.ErrShortBuffer
|
||||||
|
}
|
||||||
|
copy(unsafe.Slice((*byte)(unsafe.Pointer(v)), virtioNetHdrLen), b[:virtioNetHdrLen])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *virtioNetHdr) encode(b []byte) error {
|
||||||
|
if len(b) < virtioNetHdrLen {
|
||||||
|
return io.ErrShortBuffer
|
||||||
|
}
|
||||||
|
copy(b[:virtioNetHdrLen], unsafe.Slice((*byte)(unsafe.Pointer(v)), virtioNetHdrLen))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// virtioNetHdrLen is the length in bytes of virtioNetHdr. This matches the
|
||||||
|
// shape of the C ABI for its kernel counterpart -- sizeof(virtio_net_hdr).
|
||||||
|
virtioNetHdrLen = int(unsafe.Sizeof(virtioNetHdr{}))
|
||||||
|
)
|
||||||
|
|
||||||
|
// flowKey represents the key for a flow.
|
||||||
|
type flowKey struct {
|
||||||
|
srcAddr, dstAddr [16]byte
|
||||||
|
srcPort, dstPort uint16
|
||||||
|
rxAck uint32 // varying ack values should not be coalesced. Treat them as separate flows.
|
||||||
|
}
|
||||||
|
|
||||||
|
// tcpGROTable holds flow and coalescing information for the purposes of GRO.
|
||||||
|
type tcpGROTable struct {
|
||||||
|
itemsByFlow map[flowKey][]tcpGROItem
|
||||||
|
itemsPool [][]tcpGROItem
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTCPGROTable() *tcpGROTable {
|
||||||
|
t := &tcpGROTable{
|
||||||
|
itemsByFlow: make(map[flowKey][]tcpGROItem, wgconn.IdealBatchSize),
|
||||||
|
itemsPool: make([][]tcpGROItem, wgconn.IdealBatchSize),
|
||||||
|
}
|
||||||
|
for i := range t.itemsPool {
|
||||||
|
t.itemsPool[i] = make([]tcpGROItem, 0, wgconn.IdealBatchSize)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFlowKey(pkt []byte, srcAddr, dstAddr, tcphOffset int) flowKey {
|
||||||
|
key := flowKey{}
|
||||||
|
addrSize := dstAddr - srcAddr
|
||||||
|
copy(key.srcAddr[:], pkt[srcAddr:dstAddr])
|
||||||
|
copy(key.dstAddr[:], pkt[dstAddr:dstAddr+addrSize])
|
||||||
|
key.srcPort = binary.BigEndian.Uint16(pkt[tcphOffset:])
|
||||||
|
key.dstPort = binary.BigEndian.Uint16(pkt[tcphOffset+2:])
|
||||||
|
key.rxAck = binary.BigEndian.Uint32(pkt[tcphOffset+8:])
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookupOrInsert looks up a flow for the provided packet and metadata,
|
||||||
|
// returning the packets found for the flow, or inserting a new one if none
|
||||||
|
// is found.
|
||||||
|
func (t *tcpGROTable) lookupOrInsert(pkt []byte, srcAddrOffset, dstAddrOffset, tcphOffset, tcphLen, bufsIndex int) ([]tcpGROItem, bool) {
|
||||||
|
key := newFlowKey(pkt, srcAddrOffset, dstAddrOffset, tcphOffset)
|
||||||
|
items, ok := t.itemsByFlow[key]
|
||||||
|
if ok {
|
||||||
|
return items, ok
|
||||||
|
}
|
||||||
|
// TODO: insert() performs another map lookup. This could be rearranged to avoid.
|
||||||
|
t.insert(pkt, srcAddrOffset, dstAddrOffset, tcphOffset, tcphLen, bufsIndex)
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert an item in the table for the provided packet and packet metadata.
|
||||||
|
func (t *tcpGROTable) insert(pkt []byte, srcAddrOffset, dstAddrOffset, tcphOffset, tcphLen, bufsIndex int) {
|
||||||
|
key := newFlowKey(pkt, srcAddrOffset, dstAddrOffset, tcphOffset)
|
||||||
|
item := tcpGROItem{
|
||||||
|
key: key,
|
||||||
|
bufsIndex: uint16(bufsIndex),
|
||||||
|
gsoSize: uint16(len(pkt[tcphOffset+tcphLen:])),
|
||||||
|
iphLen: uint8(tcphOffset),
|
||||||
|
tcphLen: uint8(tcphLen),
|
||||||
|
sentSeq: binary.BigEndian.Uint32(pkt[tcphOffset+4:]),
|
||||||
|
pshSet: pkt[tcphOffset+tcpFlagsOffset]&tcpFlagPSH != 0,
|
||||||
|
}
|
||||||
|
items, ok := t.itemsByFlow[key]
|
||||||
|
if !ok {
|
||||||
|
items = t.newItems()
|
||||||
|
}
|
||||||
|
items = append(items, item)
|
||||||
|
t.itemsByFlow[key] = items
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tcpGROTable) updateAt(item tcpGROItem, i int) {
|
||||||
|
items, _ := t.itemsByFlow[item.key]
|
||||||
|
items[i] = item
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tcpGROTable) deleteAt(key flowKey, i int) {
|
||||||
|
items, _ := t.itemsByFlow[key]
|
||||||
|
items = append(items[:i], items[i+1:]...)
|
||||||
|
t.itemsByFlow[key] = items
|
||||||
|
}
|
||||||
|
|
||||||
|
// tcpGROItem represents bookkeeping data for a TCP packet during the lifetime
|
||||||
|
// of a GRO evaluation across a vector of packets.
|
||||||
|
type tcpGROItem struct {
|
||||||
|
key flowKey
|
||||||
|
sentSeq uint32 // the sequence number
|
||||||
|
bufsIndex uint16 // the index into the original bufs slice
|
||||||
|
numMerged uint16 // the number of packets merged into this item
|
||||||
|
gsoSize uint16 // payload size
|
||||||
|
iphLen uint8 // ip header len
|
||||||
|
tcphLen uint8 // tcp header len
|
||||||
|
pshSet bool // psh flag is set
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tcpGROTable) newItems() []tcpGROItem {
|
||||||
|
var items []tcpGROItem
|
||||||
|
items, t.itemsPool = t.itemsPool[len(t.itemsPool)-1], t.itemsPool[:len(t.itemsPool)-1]
|
||||||
|
return items
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tcpGROTable) reset() {
|
||||||
|
for k, items := range t.itemsByFlow {
|
||||||
|
items = items[:0]
|
||||||
|
t.itemsPool = append(t.itemsPool, items)
|
||||||
|
delete(t.itemsByFlow, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// canCoalesce represents the outcome of checking if two TCP packets are
|
||||||
|
// candidates for coalescing.
|
||||||
|
type canCoalesce int
|
||||||
|
|
||||||
|
const (
|
||||||
|
coalescePrepend canCoalesce = -1
|
||||||
|
coalesceUnavailable canCoalesce = 0
|
||||||
|
coalesceAppend canCoalesce = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// tcpPacketsCanCoalesce evaluates if pkt can be coalesced with the packet
|
||||||
|
// described by item. This function makes considerations that match the kernel's
|
||||||
|
// GRO self tests, which can be found in tools/testing/selftests/net/gro.c.
|
||||||
|
func tcpPacketsCanCoalesce(pkt []byte, iphLen, tcphLen uint8, seq uint32, pshSet bool, gsoSize uint16, item tcpGROItem, bufs [][]byte, bufsOffset int) canCoalesce {
|
||||||
|
pktTarget := bufs[item.bufsIndex][bufsOffset:]
|
||||||
|
if tcphLen != item.tcphLen {
|
||||||
|
// cannot coalesce with unequal tcp options len
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if tcphLen > 20 {
|
||||||
|
if !bytes.Equal(pkt[iphLen+20:iphLen+tcphLen], pktTarget[item.iphLen+20:iphLen+tcphLen]) {
|
||||||
|
// cannot coalesce with unequal tcp options
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pkt[0]>>4 == 6 {
|
||||||
|
if pkt[0] != pktTarget[0] || pkt[1]>>4 != pktTarget[1]>>4 {
|
||||||
|
// cannot coalesce with unequal Traffic class values
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if pkt[7] != pktTarget[7] {
|
||||||
|
// cannot coalesce with unequal Hop limit values
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if pkt[1] != pktTarget[1] {
|
||||||
|
// cannot coalesce with unequal ToS values
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if pkt[6]>>5 != pktTarget[6]>>5 {
|
||||||
|
// cannot coalesce with unequal DF or reserved bits. MF is checked
|
||||||
|
// further up the stack.
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if pkt[8] != pktTarget[8] {
|
||||||
|
// cannot coalesce with unequal TTL values
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// seq adjacency
|
||||||
|
lhsLen := item.gsoSize
|
||||||
|
lhsLen += item.numMerged * item.gsoSize
|
||||||
|
if seq == item.sentSeq+uint32(lhsLen) { // pkt aligns following item from a seq num perspective
|
||||||
|
if item.pshSet {
|
||||||
|
// We cannot append to a segment that has the PSH flag set, PSH
|
||||||
|
// can only be set on the final segment in a reassembled group.
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if len(pktTarget[iphLen+tcphLen:])%int(item.gsoSize) != 0 {
|
||||||
|
// A smaller than gsoSize packet has been appended previously.
|
||||||
|
// Nothing can come after a smaller packet on the end.
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if gsoSize > item.gsoSize {
|
||||||
|
// We cannot have a larger packet following a smaller one.
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
return coalesceAppend
|
||||||
|
} else if seq+uint32(gsoSize) == item.sentSeq { // pkt aligns in front of item from a seq num perspective
|
||||||
|
if pshSet {
|
||||||
|
// We cannot prepend with a segment that has the PSH flag set, PSH
|
||||||
|
// can only be set on the final segment in a reassembled group.
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if gsoSize < item.gsoSize {
|
||||||
|
// We cannot have a larger packet following a smaller one.
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
if gsoSize > item.gsoSize && item.numMerged > 0 {
|
||||||
|
// There's at least one previous merge, and we're larger than all
|
||||||
|
// previous. This would put multiple smaller packets on the end.
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
return coalescePrepend
|
||||||
|
}
|
||||||
|
return coalesceUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
func tcpChecksumValid(pkt []byte, iphLen uint8, isV6 bool) bool {
|
||||||
|
srcAddrAt := ipv4SrcAddrOffset
|
||||||
|
addrSize := 4
|
||||||
|
if isV6 {
|
||||||
|
srcAddrAt = ipv6SrcAddrOffset
|
||||||
|
addrSize = 16
|
||||||
|
}
|
||||||
|
tcpTotalLen := uint16(len(pkt) - int(iphLen))
|
||||||
|
tcpCSumNoFold := pseudoHeaderChecksumNoFold(unix.IPPROTO_TCP, pkt[srcAddrAt:srcAddrAt+addrSize], pkt[srcAddrAt+addrSize:srcAddrAt+addrSize*2], tcpTotalLen)
|
||||||
|
return ^checksum(pkt[iphLen:], tcpCSumNoFold) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// coalesceResult represents the result of attempting to coalesce two TCP
|
||||||
|
// packets.
|
||||||
|
type coalesceResult int
|
||||||
|
|
||||||
|
const (
|
||||||
|
coalesceInsufficientCap coalesceResult = 0
|
||||||
|
coalescePSHEnding coalesceResult = 1
|
||||||
|
coalesceItemInvalidCSum coalesceResult = 2
|
||||||
|
coalescePktInvalidCSum coalesceResult = 3
|
||||||
|
coalesceSuccess coalesceResult = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// coalesceTCPPackets attempts to coalesce pkt with the packet described by
|
||||||
|
// item, returning the outcome. This function may swap bufs elements in the
|
||||||
|
// event of a prepend as item's bufs index is already being tracked for writing
|
||||||
|
// to a Device.
|
||||||
|
func coalesceTCPPackets(mode canCoalesce, pkt []byte, pktBuffsIndex int, gsoSize uint16, seq uint32, pshSet bool, item *tcpGROItem, bufs [][]byte, bufsOffset int, isV6 bool) coalesceResult {
|
||||||
|
var pktHead []byte // the packet that will end up at the front
|
||||||
|
headersLen := item.iphLen + item.tcphLen
|
||||||
|
coalescedLen := len(bufs[item.bufsIndex][bufsOffset:]) + len(pkt) - int(headersLen)
|
||||||
|
|
||||||
|
// Copy data
|
||||||
|
if mode == coalescePrepend {
|
||||||
|
pktHead = pkt
|
||||||
|
if cap(pkt)-bufsOffset < coalescedLen {
|
||||||
|
// We don't want to allocate a new underlying array if capacity is
|
||||||
|
// too small.
|
||||||
|
return coalesceInsufficientCap
|
||||||
|
}
|
||||||
|
if pshSet {
|
||||||
|
return coalescePSHEnding
|
||||||
|
}
|
||||||
|
if item.numMerged == 0 {
|
||||||
|
if !tcpChecksumValid(bufs[item.bufsIndex][bufsOffset:], item.iphLen, isV6) {
|
||||||
|
return coalesceItemInvalidCSum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !tcpChecksumValid(pkt, item.iphLen, isV6) {
|
||||||
|
return coalescePktInvalidCSum
|
||||||
|
}
|
||||||
|
item.sentSeq = seq
|
||||||
|
extendBy := coalescedLen - len(pktHead)
|
||||||
|
bufs[pktBuffsIndex] = append(bufs[pktBuffsIndex], make([]byte, extendBy)...)
|
||||||
|
copy(bufs[pktBuffsIndex][bufsOffset+len(pkt):], bufs[item.bufsIndex][bufsOffset+int(headersLen):])
|
||||||
|
// Flip the slice headers in bufs as part of prepend. The index of item
|
||||||
|
// is already being tracked for writing.
|
||||||
|
bufs[item.bufsIndex], bufs[pktBuffsIndex] = bufs[pktBuffsIndex], bufs[item.bufsIndex]
|
||||||
|
} else {
|
||||||
|
pktHead = bufs[item.bufsIndex][bufsOffset:]
|
||||||
|
if cap(pktHead)-bufsOffset < coalescedLen {
|
||||||
|
// We don't want to allocate a new underlying array if capacity is
|
||||||
|
// too small.
|
||||||
|
return coalesceInsufficientCap
|
||||||
|
}
|
||||||
|
if item.numMerged == 0 {
|
||||||
|
if !tcpChecksumValid(bufs[item.bufsIndex][bufsOffset:], item.iphLen, isV6) {
|
||||||
|
return coalesceItemInvalidCSum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !tcpChecksumValid(pkt, item.iphLen, isV6) {
|
||||||
|
return coalescePktInvalidCSum
|
||||||
|
}
|
||||||
|
if pshSet {
|
||||||
|
// We are appending a segment with PSH set.
|
||||||
|
item.pshSet = pshSet
|
||||||
|
pktHead[item.iphLen+tcpFlagsOffset] |= tcpFlagPSH
|
||||||
|
}
|
||||||
|
extendBy := len(pkt) - int(headersLen)
|
||||||
|
bufs[item.bufsIndex] = append(bufs[item.bufsIndex], make([]byte, extendBy)...)
|
||||||
|
copy(bufs[item.bufsIndex][bufsOffset+len(pktHead):], pkt[headersLen:])
|
||||||
|
}
|
||||||
|
|
||||||
|
if gsoSize > item.gsoSize {
|
||||||
|
item.gsoSize = gsoSize
|
||||||
|
}
|
||||||
|
hdr := virtioNetHdr{
|
||||||
|
flags: unix.VIRTIO_NET_HDR_F_NEEDS_CSUM, // this turns into CHECKSUM_PARTIAL in the skb
|
||||||
|
hdrLen: uint16(headersLen),
|
||||||
|
gsoSize: uint16(item.gsoSize),
|
||||||
|
csumStart: uint16(item.iphLen),
|
||||||
|
csumOffset: 16,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recalculate the total len (IPv4) or payload len (IPv6). Recalculate the
|
||||||
|
// (IPv4) header checksum.
|
||||||
|
if isV6 {
|
||||||
|
hdr.gsoType = unix.VIRTIO_NET_HDR_GSO_TCPV6
|
||||||
|
binary.BigEndian.PutUint16(pktHead[4:], uint16(coalescedLen)-uint16(item.iphLen)) // set new payload len
|
||||||
|
} else {
|
||||||
|
hdr.gsoType = unix.VIRTIO_NET_HDR_GSO_TCPV4
|
||||||
|
pktHead[10], pktHead[11] = 0, 0 // clear checksum field
|
||||||
|
binary.BigEndian.PutUint16(pktHead[2:], uint16(coalescedLen)) // set new total length
|
||||||
|
iphCSum := ^checksum(pktHead[:item.iphLen], 0) // compute checksum
|
||||||
|
binary.BigEndian.PutUint16(pktHead[10:], iphCSum) // set checksum field
|
||||||
|
}
|
||||||
|
hdr.encode(bufs[item.bufsIndex][bufsOffset-virtioNetHdrLen:])
|
||||||
|
|
||||||
|
// Calculate the pseudo header checksum and place it at the TCP checksum
|
||||||
|
// offset. Downstream checksum offloading will combine this with computation
|
||||||
|
// of the tcp header and payload checksum.
|
||||||
|
addrLen := 4
|
||||||
|
addrOffset := ipv4SrcAddrOffset
|
||||||
|
if isV6 {
|
||||||
|
addrLen = 16
|
||||||
|
addrOffset = ipv6SrcAddrOffset
|
||||||
|
}
|
||||||
|
srcAddrAt := bufsOffset + addrOffset
|
||||||
|
srcAddr := bufs[item.bufsIndex][srcAddrAt : srcAddrAt+addrLen]
|
||||||
|
dstAddr := bufs[item.bufsIndex][srcAddrAt+addrLen : srcAddrAt+addrLen*2]
|
||||||
|
psum := pseudoHeaderChecksumNoFold(unix.IPPROTO_TCP, srcAddr, dstAddr, uint16(coalescedLen-int(item.iphLen)))
|
||||||
|
binary.BigEndian.PutUint16(pktHead[hdr.csumStart+hdr.csumOffset:], checksum([]byte{}, psum))
|
||||||
|
|
||||||
|
item.numMerged++
|
||||||
|
return coalesceSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ipv4FlagMoreFragments uint8 = 0x20
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ipv4SrcAddrOffset = 12
|
||||||
|
ipv6SrcAddrOffset = 8
|
||||||
|
maxUint16 = 1<<16 - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// tcpGRO evaluates the TCP packet at pktI in bufs for coalescing with
|
||||||
|
// existing packets tracked in table. It will return false when pktI is not
|
||||||
|
// coalesced, otherwise true. This indicates to the caller if bufs[pktI]
|
||||||
|
// should be written to the Device.
|
||||||
|
func tcpGRO(bufs [][]byte, offset int, pktI int, table *tcpGROTable, isV6 bool) (pktCoalesced bool) {
|
||||||
|
pkt := bufs[pktI][offset:]
|
||||||
|
if len(pkt) > maxUint16 {
|
||||||
|
// A valid IPv4 or IPv6 packet will never exceed this.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iphLen := int((pkt[0] & 0x0F) * 4)
|
||||||
|
if isV6 {
|
||||||
|
iphLen = 40
|
||||||
|
ipv6HPayloadLen := int(binary.BigEndian.Uint16(pkt[4:]))
|
||||||
|
if ipv6HPayloadLen != len(pkt)-iphLen {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
totalLen := int(binary.BigEndian.Uint16(pkt[2:]))
|
||||||
|
if totalLen != len(pkt) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(pkt) < iphLen {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
tcphLen := int((pkt[iphLen+12] >> 4) * 4)
|
||||||
|
if tcphLen < 20 || tcphLen > 60 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(pkt) < iphLen+tcphLen {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !isV6 {
|
||||||
|
if pkt[6]&ipv4FlagMoreFragments != 0 || pkt[6]<<3 != 0 || pkt[7] != 0 {
|
||||||
|
// no GRO support for fragmented segments for now
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tcpFlags := pkt[iphLen+tcpFlagsOffset]
|
||||||
|
var pshSet bool
|
||||||
|
// not a candidate if any non-ACK flags (except PSH+ACK) are set
|
||||||
|
if tcpFlags != tcpFlagACK {
|
||||||
|
if pkt[iphLen+tcpFlagsOffset] != tcpFlagACK|tcpFlagPSH {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
pshSet = true
|
||||||
|
}
|
||||||
|
gsoSize := uint16(len(pkt) - tcphLen - iphLen)
|
||||||
|
// not a candidate if payload len is 0
|
||||||
|
if gsoSize < 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
seq := binary.BigEndian.Uint32(pkt[iphLen+4:])
|
||||||
|
srcAddrOffset := ipv4SrcAddrOffset
|
||||||
|
addrLen := 4
|
||||||
|
if isV6 {
|
||||||
|
srcAddrOffset = ipv6SrcAddrOffset
|
||||||
|
addrLen = 16
|
||||||
|
}
|
||||||
|
items, existing := table.lookupOrInsert(pkt, srcAddrOffset, srcAddrOffset+addrLen, iphLen, tcphLen, pktI)
|
||||||
|
if !existing {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := len(items) - 1; i >= 0; i-- {
|
||||||
|
// In the best case of packets arriving in order iterating in reverse is
|
||||||
|
// more efficient if there are multiple items for a given flow. This
|
||||||
|
// also enables a natural table.deleteAt() in the
|
||||||
|
// coalesceItemInvalidCSum case without the need for index tracking.
|
||||||
|
// This algorithm makes a best effort to coalesce in the event of
|
||||||
|
// unordered packets, where pkt may land anywhere in items from a
|
||||||
|
// sequence number perspective, however once an item is inserted into
|
||||||
|
// the table it is never compared across other items later.
|
||||||
|
item := items[i]
|
||||||
|
can := tcpPacketsCanCoalesce(pkt, uint8(iphLen), uint8(tcphLen), seq, pshSet, gsoSize, item, bufs, offset)
|
||||||
|
if can != coalesceUnavailable {
|
||||||
|
result := coalesceTCPPackets(can, pkt, pktI, gsoSize, seq, pshSet, &item, bufs, offset, isV6)
|
||||||
|
switch result {
|
||||||
|
case coalesceSuccess:
|
||||||
|
table.updateAt(item, i)
|
||||||
|
return true
|
||||||
|
case coalesceItemInvalidCSum:
|
||||||
|
// delete the item with an invalid csum
|
||||||
|
table.deleteAt(item.key, i)
|
||||||
|
case coalescePktInvalidCSum:
|
||||||
|
// no point in inserting an item that we can't coalesce
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// failed to coalesce with any other packets; store the item in the flow
|
||||||
|
table.insert(pkt, srcAddrOffset, srcAddrOffset+addrLen, iphLen, tcphLen, pktI)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isTCP4NoIPOptions(b []byte) bool {
|
||||||
|
if len(b) < 40 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if b[0]>>4 != 4 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if b[0]&0x0F != 5 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if b[9] != unix.IPPROTO_TCP {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isTCP6NoEH(b []byte) bool {
|
||||||
|
if len(b) < 60 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if b[0]>>4 != 6 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if b[6] != unix.IPPROTO_TCP {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleGRO evaluates bufs for GRO, and writes the indices of the resulting
|
||||||
|
// packets into toWrite. toWrite, tcp4Table, and tcp6Table should initially be
|
||||||
|
// empty (but non-nil), and are passed in to save allocs as the caller may reset
|
||||||
|
// and recycle them across vectors of packets.
|
||||||
|
func handleGRO(bufs [][]byte, offset int, tcp4Table, tcp6Table *tcpGROTable, toWrite *[]int) error {
|
||||||
|
for i := range bufs {
|
||||||
|
if offset < virtioNetHdrLen || offset > len(bufs[i])-1 {
|
||||||
|
return errors.New("invalid offset")
|
||||||
|
}
|
||||||
|
var coalesced bool
|
||||||
|
switch {
|
||||||
|
case isTCP4NoIPOptions(bufs[i][offset:]): // ipv4 packets w/IP options do not coalesce
|
||||||
|
coalesced = tcpGRO(bufs, offset, i, tcp4Table, false)
|
||||||
|
case isTCP6NoEH(bufs[i][offset:]): // ipv6 packets w/extension headers do not coalesce
|
||||||
|
coalesced = tcpGRO(bufs, offset, i, tcp6Table, true)
|
||||||
|
}
|
||||||
|
if !coalesced {
|
||||||
|
hdr := virtioNetHdr{}
|
||||||
|
err := hdr.encode(bufs[i][offset-virtioNetHdrLen:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*toWrite = append(*toWrite, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tcpTSO splits packets from in into outBuffs, writing the size of each
|
||||||
|
// element into sizes. It returns the number of buffers populated, and/or an
|
||||||
|
// error.
|
||||||
|
func tcpTSO(in []byte, hdr virtioNetHdr, outBuffs [][]byte, sizes []int, outOffset int) (int, error) {
|
||||||
|
iphLen := int(hdr.csumStart)
|
||||||
|
srcAddrOffset := ipv6SrcAddrOffset
|
||||||
|
addrLen := 16
|
||||||
|
if hdr.gsoType == unix.VIRTIO_NET_HDR_GSO_TCPV4 {
|
||||||
|
in[10], in[11] = 0, 0 // clear ipv4 header checksum
|
||||||
|
srcAddrOffset = ipv4SrcAddrOffset
|
||||||
|
addrLen = 4
|
||||||
|
}
|
||||||
|
tcpCSumAt := int(hdr.csumStart + hdr.csumOffset)
|
||||||
|
in[tcpCSumAt], in[tcpCSumAt+1] = 0, 0 // clear tcp checksum
|
||||||
|
firstTCPSeqNum := binary.BigEndian.Uint32(in[hdr.csumStart+4:])
|
||||||
|
nextSegmentDataAt := int(hdr.hdrLen)
|
||||||
|
i := 0
|
||||||
|
for ; nextSegmentDataAt < len(in); i++ {
|
||||||
|
if i == len(outBuffs) {
|
||||||
|
return i - 1, ErrTooManySegments
|
||||||
|
}
|
||||||
|
nextSegmentEnd := nextSegmentDataAt + int(hdr.gsoSize)
|
||||||
|
if nextSegmentEnd > len(in) {
|
||||||
|
nextSegmentEnd = len(in)
|
||||||
|
}
|
||||||
|
segmentDataLen := nextSegmentEnd - nextSegmentDataAt
|
||||||
|
totalLen := int(hdr.hdrLen) + segmentDataLen
|
||||||
|
sizes[i] = totalLen
|
||||||
|
out := outBuffs[i][outOffset:]
|
||||||
|
|
||||||
|
copy(out, in[:iphLen])
|
||||||
|
if hdr.gsoType == unix.VIRTIO_NET_HDR_GSO_TCPV4 {
|
||||||
|
// For IPv4 we are responsible for incrementing the ID field,
|
||||||
|
// updating the total len field, and recalculating the header
|
||||||
|
// checksum.
|
||||||
|
if i > 0 {
|
||||||
|
id := binary.BigEndian.Uint16(out[4:])
|
||||||
|
id += uint16(i)
|
||||||
|
binary.BigEndian.PutUint16(out[4:], id)
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint16(out[2:], uint16(totalLen))
|
||||||
|
ipv4CSum := ^checksum(out[:iphLen], 0)
|
||||||
|
binary.BigEndian.PutUint16(out[10:], ipv4CSum)
|
||||||
|
} else {
|
||||||
|
// For IPv6 we are responsible for updating the payload length field.
|
||||||
|
binary.BigEndian.PutUint16(out[4:], uint16(totalLen-iphLen))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TCP header
|
||||||
|
copy(out[hdr.csumStart:hdr.hdrLen], in[hdr.csumStart:hdr.hdrLen])
|
||||||
|
tcpSeq := firstTCPSeqNum + uint32(hdr.gsoSize*uint16(i))
|
||||||
|
binary.BigEndian.PutUint32(out[hdr.csumStart+4:], tcpSeq)
|
||||||
|
if nextSegmentEnd != len(in) {
|
||||||
|
// FIN and PSH should only be set on last segment
|
||||||
|
clearFlags := tcpFlagFIN | tcpFlagPSH
|
||||||
|
out[hdr.csumStart+tcpFlagsOffset] &^= clearFlags
|
||||||
|
}
|
||||||
|
|
||||||
|
// payload
|
||||||
|
copy(out[hdr.hdrLen:], in[nextSegmentDataAt:nextSegmentEnd])
|
||||||
|
|
||||||
|
// TCP checksum
|
||||||
|
tcpHLen := int(hdr.hdrLen - hdr.csumStart)
|
||||||
|
tcpLenForPseudo := uint16(tcpHLen + segmentDataLen)
|
||||||
|
tcpCSumNoFold := pseudoHeaderChecksumNoFold(unix.IPPROTO_TCP, in[srcAddrOffset:srcAddrOffset+addrLen], in[srcAddrOffset+addrLen:srcAddrOffset+addrLen*2], tcpLenForPseudo)
|
||||||
|
tcpCSum := ^checksum(out[hdr.csumStart:totalLen], tcpCSumNoFold)
|
||||||
|
binary.BigEndian.PutUint16(out[hdr.csumStart+hdr.csumOffset:], tcpCSum)
|
||||||
|
|
||||||
|
nextSegmentDataAt += int(hdr.gsoSize)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func gsoNoneChecksum(in []byte, cSumStart, cSumOffset uint16) error {
|
||||||
|
cSumAt := cSumStart + cSumOffset
|
||||||
|
// The initial value at the checksum offset should be summed with the
|
||||||
|
// checksum we compute. This is typically the pseudo-header checksum.
|
||||||
|
initial := binary.BigEndian.Uint16(in[cSumAt:])
|
||||||
|
in[cSumAt], in[cSumAt+1] = 0, 0
|
||||||
|
binary.BigEndian.PutUint16(in[cSumAt:], ^checksum(in[cSumStart:], uint64(initial)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
52
wgstack/tun/tun.go
Normal file
52
wgstack/tun/tun.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package tun
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event int
|
||||||
|
|
||||||
|
const (
|
||||||
|
EventUp = 1 << iota
|
||||||
|
EventDown
|
||||||
|
EventMTUUpdate
|
||||||
|
)
|
||||||
|
|
||||||
|
type Device interface {
|
||||||
|
// File returns the file descriptor of the device.
|
||||||
|
File() *os.File
|
||||||
|
|
||||||
|
// Read one or more packets from the Device (without any additional headers).
|
||||||
|
// On a successful read it returns the number of packets read, and sets
|
||||||
|
// packet lengths within the sizes slice. len(sizes) must be >= len(bufs).
|
||||||
|
// A nonzero offset can be used to instruct the Device on where to begin
|
||||||
|
// reading into each element of the bufs slice.
|
||||||
|
Read(bufs [][]byte, sizes []int, offset int) (n int, err error)
|
||||||
|
|
||||||
|
// Write one or more packets to the device (without any additional headers).
|
||||||
|
// On a successful write it returns the number of packets written. A nonzero
|
||||||
|
// offset can be used to instruct the Device on where to begin writing from
|
||||||
|
// each packet contained within the bufs slice.
|
||||||
|
Write(bufs [][]byte, offset int) (int, error)
|
||||||
|
|
||||||
|
// MTU returns the MTU of the Device.
|
||||||
|
MTU() (int, error)
|
||||||
|
|
||||||
|
// Name returns the current name of the Device.
|
||||||
|
Name() (string, error)
|
||||||
|
|
||||||
|
// Events returns a channel of type Event, which is fed Device events.
|
||||||
|
Events() <-chan Event
|
||||||
|
|
||||||
|
// Close stops the Device and closes the Event channel.
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// BatchSize returns the preferred/max number of packets that can be read or
|
||||||
|
// written in a single read/write call. BatchSize must not change over the
|
||||||
|
// lifetime of a Device.
|
||||||
|
BatchSize() int
|
||||||
|
}
|
||||||
664
wgstack/tun/tun_linux.go
Normal file
664
wgstack/tun/tun_linux.go
Normal file
@@ -0,0 +1,664 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
// Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
||||||
|
|
||||||
|
package tun
|
||||||
|
|
||||||
|
/* Implementation of the TUN device interface for linux
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
wgconn "github.com/slackhq/nebula/wgstack/conn"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
"golang.zx2c4.com/wireguard/rwcancel"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cloneDevicePath = "/dev/net/tun"
|
||||||
|
ifReqSize = unix.IFNAMSIZ + 64
|
||||||
|
)
|
||||||
|
|
||||||
|
type NativeTun struct {
|
||||||
|
tunFile *os.File
|
||||||
|
index int32 // if index
|
||||||
|
errors chan error // async error handling
|
||||||
|
events chan Event // device related events
|
||||||
|
netlinkSock int
|
||||||
|
netlinkCancel *rwcancel.RWCancel
|
||||||
|
hackListenerClosed sync.Mutex
|
||||||
|
statusListenersShutdown chan struct{}
|
||||||
|
batchSize int
|
||||||
|
vnetHdr bool
|
||||||
|
|
||||||
|
closeOnce sync.Once
|
||||||
|
|
||||||
|
nameOnce sync.Once // guards calling initNameCache, which sets following fields
|
||||||
|
nameCache string // name of interface
|
||||||
|
nameErr error
|
||||||
|
|
||||||
|
readOpMu sync.Mutex // readOpMu guards readBuff
|
||||||
|
readBuff [virtioNetHdrLen + 65535]byte // if vnetHdr every read() is prefixed by virtioNetHdr
|
||||||
|
|
||||||
|
writeOpMu sync.Mutex // writeOpMu guards toWrite, tcp4GROTable, tcp6GROTable
|
||||||
|
toWrite []int
|
||||||
|
tcp4GROTable, tcp6GROTable *tcpGROTable
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) File() *os.File {
|
||||||
|
return tun.tunFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) routineHackListener() {
|
||||||
|
defer tun.hackListenerClosed.Unlock()
|
||||||
|
/* This is needed for the detection to work across network namespaces
|
||||||
|
* If you are reading this and know a better method, please get in touch.
|
||||||
|
*/
|
||||||
|
last := 0
|
||||||
|
const (
|
||||||
|
up = 1
|
||||||
|
down = 2
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
sysconn, err := tun.tunFile.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err2 := sysconn.Control(func(fd uintptr) {
|
||||||
|
_, err = unix.Write(int(fd), nil)
|
||||||
|
})
|
||||||
|
if err2 != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case unix.EINVAL:
|
||||||
|
if last != up {
|
||||||
|
// If the tunnel is up, it reports that write() is
|
||||||
|
// allowed but we provided invalid data.
|
||||||
|
tun.events <- EventUp
|
||||||
|
last = up
|
||||||
|
}
|
||||||
|
case unix.EIO:
|
||||||
|
if last != down {
|
||||||
|
// If the tunnel is down, it reports that no I/O
|
||||||
|
// is possible, without checking our provided data.
|
||||||
|
tun.events <- EventDown
|
||||||
|
last = down
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
// nothing
|
||||||
|
case <-tun.statusListenersShutdown:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createNetlinkSocket() (int, error) {
|
||||||
|
sock, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, unix.NETLINK_ROUTE)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
saddr := &unix.SockaddrNetlink{
|
||||||
|
Family: unix.AF_NETLINK,
|
||||||
|
Groups: unix.RTMGRP_LINK | unix.RTMGRP_IPV4_IFADDR | unix.RTMGRP_IPV6_IFADDR,
|
||||||
|
}
|
||||||
|
err = unix.Bind(sock, saddr)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
return sock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) routineNetlinkListener() {
|
||||||
|
defer func() {
|
||||||
|
unix.Close(tun.netlinkSock)
|
||||||
|
tun.hackListenerClosed.Lock()
|
||||||
|
close(tun.events)
|
||||||
|
tun.netlinkCancel.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
for msg := make([]byte, 1<<16); ; {
|
||||||
|
var err error
|
||||||
|
var msgn int
|
||||||
|
for {
|
||||||
|
msgn, _, _, _, err = unix.Recvmsg(tun.netlinkSock, msg[:], nil, 0)
|
||||||
|
if err == nil || !rwcancel.RetryAfterError(err) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !tun.netlinkCancel.ReadyRead() {
|
||||||
|
tun.errors <- fmt.Errorf("netlink socket closed: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
tun.errors <- fmt.Errorf("failed to receive netlink message: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-tun.statusListenersShutdown:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
wasEverUp := false
|
||||||
|
for remain := msg[:msgn]; len(remain) >= unix.SizeofNlMsghdr; {
|
||||||
|
|
||||||
|
hdr := *(*unix.NlMsghdr)(unsafe.Pointer(&remain[0]))
|
||||||
|
|
||||||
|
if int(hdr.Len) > len(remain) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
switch hdr.Type {
|
||||||
|
case unix.NLMSG_DONE:
|
||||||
|
remain = []byte{}
|
||||||
|
|
||||||
|
case unix.RTM_NEWLINK:
|
||||||
|
info := *(*unix.IfInfomsg)(unsafe.Pointer(&remain[unix.SizeofNlMsghdr]))
|
||||||
|
remain = remain[hdr.Len:]
|
||||||
|
|
||||||
|
if info.Index != tun.index {
|
||||||
|
// not our interface
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Flags&unix.IFF_RUNNING != 0 {
|
||||||
|
tun.events <- EventUp
|
||||||
|
wasEverUp = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Flags&unix.IFF_RUNNING == 0 {
|
||||||
|
// Don't emit EventDown before we've ever emitted EventUp.
|
||||||
|
// This avoids a startup race with HackListener, which
|
||||||
|
// might detect Up before we have finished reporting Down.
|
||||||
|
if wasEverUp {
|
||||||
|
tun.events <- EventDown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tun.events <- EventMTUUpdate
|
||||||
|
|
||||||
|
default:
|
||||||
|
remain = remain[hdr.Len:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIFIndex(name string) (int32, error) {
|
||||||
|
fd, err := unix.Socket(
|
||||||
|
unix.AF_INET,
|
||||||
|
unix.SOCK_DGRAM|unix.SOCK_CLOEXEC,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer unix.Close(fd)
|
||||||
|
|
||||||
|
var ifr [ifReqSize]byte
|
||||||
|
copy(ifr[:], name)
|
||||||
|
_, _, errno := unix.Syscall(
|
||||||
|
unix.SYS_IOCTL,
|
||||||
|
uintptr(fd),
|
||||||
|
uintptr(unix.SIOCGIFINDEX),
|
||||||
|
uintptr(unsafe.Pointer(&ifr[0])),
|
||||||
|
)
|
||||||
|
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return *(*int32)(unsafe.Pointer(&ifr[unix.IFNAMSIZ])), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) setMTU(n int) error {
|
||||||
|
name, err := tun.Name()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// open datagram socket
|
||||||
|
fd, err := unix.Socket(
|
||||||
|
unix.AF_INET,
|
||||||
|
unix.SOCK_DGRAM|unix.SOCK_CLOEXEC,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer unix.Close(fd)
|
||||||
|
|
||||||
|
var ifr [ifReqSize]byte
|
||||||
|
copy(ifr[:], name)
|
||||||
|
*(*uint32)(unsafe.Pointer(&ifr[unix.IFNAMSIZ])) = uint32(n)
|
||||||
|
|
||||||
|
_, _, errno := unix.Syscall(
|
||||||
|
unix.SYS_IOCTL,
|
||||||
|
uintptr(fd),
|
||||||
|
uintptr(unix.SIOCSIFMTU),
|
||||||
|
uintptr(unsafe.Pointer(&ifr[0])),
|
||||||
|
)
|
||||||
|
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) routineNetlinkRead() {
|
||||||
|
defer func() {
|
||||||
|
unix.Close(tun.netlinkSock)
|
||||||
|
tun.hackListenerClosed.Lock()
|
||||||
|
close(tun.events)
|
||||||
|
tun.netlinkCancel.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
for msg := make([]byte, 1<<16); ; {
|
||||||
|
var err error
|
||||||
|
var msgn int
|
||||||
|
for {
|
||||||
|
msgn, _, _, _, err = unix.Recvmsg(tun.netlinkSock, msg[:], nil, 0)
|
||||||
|
if err == nil || !rwcancel.RetryAfterError(err) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !tun.netlinkCancel.ReadyRead() {
|
||||||
|
tun.errors <- fmt.Errorf("netlink socket closed: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
tun.errors <- fmt.Errorf("failed to receive netlink message: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
wasEverUp := false
|
||||||
|
for remain := msg[:msgn]; len(remain) >= unix.SizeofNlMsghdr; {
|
||||||
|
|
||||||
|
hdr := *(*unix.NlMsghdr)(unsafe.Pointer(&remain[0]))
|
||||||
|
|
||||||
|
if int(hdr.Len) > len(remain) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
switch hdr.Type {
|
||||||
|
case unix.NLMSG_DONE:
|
||||||
|
remain = []byte{}
|
||||||
|
|
||||||
|
case unix.RTM_NEWLINK:
|
||||||
|
info := *(*unix.IfInfomsg)(unsafe.Pointer(&remain[unix.SizeofNlMsghdr]))
|
||||||
|
remain = remain[hdr.Len:]
|
||||||
|
|
||||||
|
if info.Index != tun.index {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Flags&unix.IFF_RUNNING != 0 {
|
||||||
|
tun.events <- EventUp
|
||||||
|
wasEverUp = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Flags&unix.IFF_RUNNING == 0 {
|
||||||
|
if wasEverUp {
|
||||||
|
tun.events <- EventDown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tun.events <- EventMTUUpdate
|
||||||
|
|
||||||
|
default:
|
||||||
|
remain = remain[hdr.Len:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) routineNetlink() {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
tun.netlinkSock, err = createNetlinkSocket()
|
||||||
|
if err != nil {
|
||||||
|
tun.errors <- fmt.Errorf("failed to create netlink socket: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tun.netlinkCancel, err = rwcancel.NewRWCancel(tun.netlinkSock)
|
||||||
|
if err != nil {
|
||||||
|
tun.errors <- fmt.Errorf("failed to create netlink cancel: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go tun.routineNetlinkListener()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) Close() error {
|
||||||
|
var err1, err2 error
|
||||||
|
tun.closeOnce.Do(func() {
|
||||||
|
if tun.statusListenersShutdown != nil {
|
||||||
|
close(tun.statusListenersShutdown)
|
||||||
|
if tun.netlinkCancel != nil {
|
||||||
|
err1 = tun.netlinkCancel.Cancel()
|
||||||
|
}
|
||||||
|
} else if tun.events != nil {
|
||||||
|
close(tun.events)
|
||||||
|
}
|
||||||
|
err2 = tun.tunFile.Close()
|
||||||
|
})
|
||||||
|
if err1 != nil {
|
||||||
|
return err1
|
||||||
|
}
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) BatchSize() int {
|
||||||
|
return tun.batchSize
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TODO: support TSO with ECN bits
|
||||||
|
tunOffloads = unix.TUN_F_CSUM | unix.TUN_F_TSO4 | unix.TUN_F_TSO6
|
||||||
|
)
|
||||||
|
|
||||||
|
func (tun *NativeTun) initFromFlags(name string) error {
|
||||||
|
sc, err := tun.tunFile.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if e := sc.Control(func(fd uintptr) {
|
||||||
|
var (
|
||||||
|
ifr *unix.Ifreq
|
||||||
|
)
|
||||||
|
ifr, err = unix.NewIfreq(name)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = unix.IoctlIfreq(int(fd), unix.TUNGETIFF, ifr)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
got := ifr.Uint16()
|
||||||
|
if got&unix.IFF_VNET_HDR != 0 {
|
||||||
|
err = unix.IoctlSetInt(int(fd), unix.TUNSETOFFLOAD, tunOffloads)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tun.vnetHdr = true
|
||||||
|
tun.batchSize = wgconn.IdealBatchSize
|
||||||
|
} else {
|
||||||
|
tun.batchSize = 1
|
||||||
|
}
|
||||||
|
}); e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTUN creates a Device with the provided name and MTU.
|
||||||
|
func CreateTUN(name string, mtu int) (Device, error) {
|
||||||
|
nfd, err := unix.Open(cloneDevicePath, unix.O_RDWR|unix.O_CLOEXEC, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("CreateTUN(%q) failed; %s does not exist", name, cloneDevicePath)
|
||||||
|
}
|
||||||
|
fd := os.NewFile(uintptr(nfd), cloneDevicePath)
|
||||||
|
tun, err := CreateTUNFromFile(fd, mtu)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if name != "tun" {
|
||||||
|
if err := tun.(*NativeTun).initFromFlags(name); err != nil {
|
||||||
|
tun.Close()
|
||||||
|
return nil, fmt.Errorf("CreateTUN(%q) failed to set flags: %w", name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tun, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTUNFromFile creates a Device from an os.File with the provided MTU.
|
||||||
|
func CreateTUNFromFile(file *os.File, mtu int) (Device, error) {
|
||||||
|
tun := &NativeTun{
|
||||||
|
tunFile: file,
|
||||||
|
errors: make(chan error, 5),
|
||||||
|
events: make(chan Event, 5),
|
||||||
|
}
|
||||||
|
|
||||||
|
name, err := tun.Name()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to determine TUN name: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tun.initFromFlags(name); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to query TUN flags: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tun.batchSize == 0 {
|
||||||
|
tun.batchSize = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
tun.index, err = getIFIndex(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get TUN index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tun.setMTU(mtu); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to set MTU: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tun.statusListenersShutdown = make(chan struct{})
|
||||||
|
go tun.routineNetlink()
|
||||||
|
|
||||||
|
if tun.batchSize == 0 {
|
||||||
|
tun.batchSize = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
tun.tcp4GROTable = newTCPGROTable()
|
||||||
|
tun.tcp6GROTable = newTCPGROTable()
|
||||||
|
|
||||||
|
return tun, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) Name() (string, error) {
|
||||||
|
tun.nameOnce.Do(tun.initNameCache)
|
||||||
|
return tun.nameCache, tun.nameErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) initNameCache() {
|
||||||
|
sysconn, err := tun.tunFile.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
tun.nameErr = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = sysconn.Control(func(fd uintptr) {
|
||||||
|
var ifr [ifReqSize]byte
|
||||||
|
_, _, errno := unix.Syscall(
|
||||||
|
unix.SYS_IOCTL,
|
||||||
|
fd,
|
||||||
|
uintptr(unix.TUNGETIFF),
|
||||||
|
uintptr(unsafe.Pointer(&ifr[0])),
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
tun.nameErr = errno
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tun.nameCache = unix.ByteSliceToString(ifr[:])
|
||||||
|
})
|
||||||
|
if err != nil && tun.nameErr == nil {
|
||||||
|
tun.nameErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) MTU() (int, error) {
|
||||||
|
name, err := tun.Name()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// open datagram socket
|
||||||
|
fd, err := unix.Socket(
|
||||||
|
unix.AF_INET,
|
||||||
|
unix.SOCK_DGRAM|unix.SOCK_CLOEXEC,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer unix.Close(fd)
|
||||||
|
|
||||||
|
var ifr [ifReqSize]byte
|
||||||
|
copy(ifr[:], name)
|
||||||
|
|
||||||
|
_, _, errno := unix.Syscall(
|
||||||
|
unix.SYS_IOCTL,
|
||||||
|
uintptr(fd),
|
||||||
|
uintptr(unix.SIOCGIFMTU),
|
||||||
|
uintptr(unsafe.Pointer(&ifr[0])),
|
||||||
|
)
|
||||||
|
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(*(*uint32)(unsafe.Pointer(&ifr[unix.IFNAMSIZ]))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) Events() <-chan Event {
|
||||||
|
return tun.events
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) Write(bufs [][]byte, offset int) (int, error) {
|
||||||
|
tun.writeOpMu.Lock()
|
||||||
|
defer func() {
|
||||||
|
tun.tcp4GROTable.reset()
|
||||||
|
tun.tcp6GROTable.reset()
|
||||||
|
tun.writeOpMu.Unlock()
|
||||||
|
}()
|
||||||
|
var (
|
||||||
|
errs error
|
||||||
|
total int
|
||||||
|
)
|
||||||
|
tun.toWrite = tun.toWrite[:0]
|
||||||
|
if tun.vnetHdr {
|
||||||
|
err := handleGRO(bufs, offset, tun.tcp4GROTable, tun.tcp6GROTable, &tun.toWrite)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
offset -= virtioNetHdrLen
|
||||||
|
} else {
|
||||||
|
for i := range bufs {
|
||||||
|
tun.toWrite = append(tun.toWrite, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, bufsI := range tun.toWrite {
|
||||||
|
n, err := tun.tunFile.Write(bufs[bufsI][offset:])
|
||||||
|
if errors.Is(err, syscall.EBADFD) {
|
||||||
|
return total, os.ErrClosed
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errs = errors.Join(errs, err)
|
||||||
|
} else {
|
||||||
|
total += n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return total, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleVirtioRead splits in into bufs, leaving offset bytes at the front of
|
||||||
|
// each buffer. It mutates sizes to reflect the size of each element of bufs,
|
||||||
|
// and returns the number of packets read.
|
||||||
|
func handleVirtioRead(in []byte, bufs [][]byte, sizes []int, offset int) (int, error) {
|
||||||
|
var hdr virtioNetHdr
|
||||||
|
if err := hdr.decode(in); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
in = in[virtioNetHdrLen:]
|
||||||
|
if hdr.gsoType == unix.VIRTIO_NET_HDR_GSO_NONE {
|
||||||
|
if hdr.flags&unix.VIRTIO_NET_HDR_F_NEEDS_CSUM != 0 {
|
||||||
|
if err := gsoNoneChecksum(in, hdr.csumStart, hdr.csumOffset); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(in) > len(bufs[0][offset:]) {
|
||||||
|
return 0, fmt.Errorf("read len %d overflows bufs element len %d", len(in), len(bufs[0][offset:]))
|
||||||
|
}
|
||||||
|
n := copy(bufs[0][offset:], in)
|
||||||
|
sizes[0] = n
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
if hdr.gsoType != unix.VIRTIO_NET_HDR_GSO_TCPV4 && hdr.gsoType != unix.VIRTIO_NET_HDR_GSO_TCPV6 {
|
||||||
|
return 0, fmt.Errorf("unsupported virtio GSO type: %d", hdr.gsoType)
|
||||||
|
}
|
||||||
|
|
||||||
|
ipVersion := in[0] >> 4
|
||||||
|
switch ipVersion {
|
||||||
|
case 4:
|
||||||
|
if hdr.gsoType != unix.VIRTIO_NET_HDR_GSO_TCPV4 {
|
||||||
|
return 0, fmt.Errorf("ip header version: %d, GSO type: %d", ipVersion, hdr.gsoType)
|
||||||
|
}
|
||||||
|
case 6:
|
||||||
|
if hdr.gsoType != unix.VIRTIO_NET_HDR_GSO_TCPV6 {
|
||||||
|
return 0, fmt.Errorf("ip header version: %d, GSO type: %d", ipVersion, hdr.gsoType)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("invalid ip header version: %d", ipVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(in) <= int(hdr.csumStart+12) {
|
||||||
|
return 0, errors.New("packet is too short")
|
||||||
|
}
|
||||||
|
tcpHLen := uint16(in[hdr.csumStart+12] >> 4 * 4)
|
||||||
|
if tcpHLen < 20 || tcpHLen > 60 {
|
||||||
|
return 0, fmt.Errorf("tcp header len is invalid: %d", tcpHLen)
|
||||||
|
}
|
||||||
|
hdr.hdrLen = hdr.csumStart + tcpHLen
|
||||||
|
if len(in) < int(hdr.hdrLen) {
|
||||||
|
return 0, fmt.Errorf("length of packet (%d) < virtioNetHdr.hdrLen (%d)", len(in), hdr.hdrLen)
|
||||||
|
}
|
||||||
|
if hdr.hdrLen < hdr.csumStart {
|
||||||
|
return 0, fmt.Errorf("virtioNetHdr.hdrLen (%d) < virtioNetHdr.csumStart (%d)", hdr.hdrLen, hdr.csumStart)
|
||||||
|
}
|
||||||
|
cSumAt := int(hdr.csumStart + hdr.csumOffset)
|
||||||
|
if cSumAt+1 >= len(in) {
|
||||||
|
return 0, fmt.Errorf("end of checksum offset (%d) exceeds packet length (%d)", cSumAt+1, len(in))
|
||||||
|
}
|
||||||
|
|
||||||
|
return tcpTSO(in, hdr, bufs, sizes, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tun *NativeTun) Read(bufs [][]byte, sizes []int, offset int) (int, error) {
|
||||||
|
tun.readOpMu.Lock()
|
||||||
|
defer tun.readOpMu.Unlock()
|
||||||
|
select {
|
||||||
|
case err := <-tun.errors:
|
||||||
|
return 0, err
|
||||||
|
default:
|
||||||
|
readInto := bufs[0][offset:]
|
||||||
|
if tun.vnetHdr {
|
||||||
|
readInto = tun.readBuff[:]
|
||||||
|
}
|
||||||
|
n, err := tun.tunFile.Read(readInto)
|
||||||
|
if errors.Is(err, syscall.EBADFD) {
|
||||||
|
err = os.ErrClosed
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if tun.vnetHdr {
|
||||||
|
return handleVirtioRead(readInto[:n], bufs, sizes, offset)
|
||||||
|
}
|
||||||
|
sizes[0] = n
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user