Merge remote-tracking branch 'origin/master' into multiport

This commit is contained in:
Wade Simmons 2023-03-13 15:07:32 -04:00
commit aec7f5f865
63 changed files with 1618 additions and 945 deletions

View File

@ -1,9 +1,13 @@
blank_issues_enabled: true blank_issues_enabled: true
contact_links: contact_links:
- name: 📘 Documentation - name: 📘 Documentation
url: https://www.defined.net/nebula/ url: https://nebula.defined.net/docs/
about: Review documentation. about: Review documentation.
- name: 💁 Support/Chat - name: 💁 Support/Chat
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!' about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
- name: 📱 Mobile Nebula
url: https://github.com/definednet/mobile_nebula
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'

View File

@ -14,10 +14,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.18 - name: Set up Go 1.19
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.19
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
@ -26,9 +26,9 @@ jobs:
- uses: actions/cache@v2 - uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-gofmt1.18-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-gofmt1.19-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-gofmt1.18- ${{ runner.os }}-gofmt1.19-
- name: Install goimports - name: Install goimports
run: | run: |

View File

@ -10,10 +10,10 @@ jobs:
name: Build Linux All name: Build Linux All
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.18 - name: Set up Go 1.19
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.19
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
@ -34,10 +34,10 @@ jobs:
name: Build Windows name: Build Windows
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- name: Set up Go 1.18 - name: Set up Go 1.19
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.19
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
@ -68,10 +68,10 @@ jobs:
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }} HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
runs-on: macos-11 runs-on: macos-11
steps: steps:
- name: Set up Go 1.18 - name: Set up Go 1.19
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.19
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2

View File

@ -18,10 +18,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.18 - name: Set up Go 1.19
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.19
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
@ -30,9 +30,9 @@ jobs:
- uses: actions/cache@v2 - uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go1.19-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-go1.18- ${{ runner.os }}-go1.19-
- name: build - name: build
run: make bin-docker run: make bin-docker

View File

@ -18,10 +18,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.18 - name: Set up Go 1.19
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.19
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
@ -30,9 +30,9 @@ jobs:
- uses: actions/cache@v2 - uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go1.19-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-go1.18- ${{ runner.os }}-go1.19-
- name: Build - name: Build
run: make all run: make all
@ -57,10 +57,10 @@ jobs:
os: [windows-latest, macos-11] os: [windows-latest, macos-11]
steps: steps:
- name: Set up Go 1.18 - name: Set up Go 1.19
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.19
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
@ -69,9 +69,9 @@ jobs:
- uses: actions/cache@v2 - uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go1.19-${{ hashFiles('**/go.sum') }}
restore-keys: | restore-keys: |
${{ runner.os }}-go1.18- ${{ runner.os }}-go1.19-
- name: Build nebula - name: Build nebula
run: go build ./cmd/nebula run: go build ./cmd/nebula

8
.gitignore vendored
View File

@ -4,10 +4,14 @@
/nebula-arm6 /nebula-arm6
/nebula-darwin /nebula-darwin
/nebula.exe /nebula.exe
/cert/*.crt /nebula-cert.exe
/cert/*.key
/coverage.out /coverage.out
/cpu.pprof /cpu.pprof
/build /build
/*.tar.gz /*.tar.gz
/e2e/mermaid/ /e2e/mermaid/
**.crt
**.key
**.pem
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt

View File

@ -1,4 +1,4 @@
GOMINVERSION = 1.18 GOMINVERSION = 1.19
NEBULA_CMD_PATH = "./cmd/nebula" NEBULA_CMD_PATH = "./cmd/nebula"
GO111MODULE = on GO111MODULE = on
export GO111MODULE export GO111MODULE

View File

@ -8,7 +8,7 @@ and tunneling, and each of those individual pieces existed before Nebula in vari
What makes Nebula different to existing offerings is that it brings all of these ideas together, What makes Nebula different to existing offerings is that it brings all of these ideas together,
resulting in a sum that is greater than its individual parts. resulting in a sum that is greater than its individual parts.
Further documentation can be found [here](https://www.defined.net/nebula/). Further documentation can be found [here](https://nebula.defined.net/docs/).
You can read more about Nebula [here](https://medium.com/p/884110a5579). You can read more about Nebula [here](https://medium.com/p/884110a5579).
@ -31,12 +31,16 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
``` ```
$ sudo pacman -S nebula $ sudo pacman -S nebula
``` ```
- [Fedora Linux](https://copr.fedorainfracloud.org/coprs/jdoss/nebula/) - [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
``` ```
$ sudo dnf copr enable jdoss/nebula
$ sudo dnf install nebula $ sudo dnf install nebula
``` ```
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
```
$ brew install nebula
```
#### Mobile #### Mobile
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200) - [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
@ -93,13 +97,13 @@ Download a copy of the nebula [example configuration](https://github.com/slackhq
#### 6. Copy nebula credentials, configuration, and binaries to each host #### 6. Copy nebula credentials, configuration, and binaries to each host
For each host, copy the nebula binary to the host, along with `config.yaml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4. For each host, copy the nebula binary to the host, along with `config.yml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4.
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.** **DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
#### 7. Run nebula on each host #### 7. Run nebula on each host
``` ```
./nebula -config /path/to/config.yaml ./nebula -config /path/to/config.yml
``` ```
## Building Nebula from source ## Building Nebula from source

View File

@ -127,6 +127,8 @@ func help(err string, out io.Writer) {
fmt.Fprintln(out, " "+signSummary()) fmt.Fprintln(out, " "+signSummary())
fmt.Fprintln(out, " "+printSummary()) fmt.Fprintln(out, " "+printSummary())
fmt.Fprintln(out, " "+verifySummary()) fmt.Fprintln(out, " "+verifySummary())
fmt.Fprintln(out, "")
fmt.Fprintf(out, " To see usage for a given mode, use %s <mode> -h\n", os.Args[0])
} }
func mustFlagString(name string, val *string) error { func mustFlagString(name string, val *string) error {

View File

@ -22,7 +22,9 @@ func Test_help(t *testing.T) {
" " + keygenSummary() + "\n" + " " + keygenSummary() + "\n" +
" " + signSummary() + "\n" + " " + signSummary() + "\n" +
" " + printSummary() + "\n" + " " + printSummary() + "\n" +
" " + verifySummary() + "\n" " " + verifySummary() + "\n" +
"\n" +
" To see usage for a given mode, use " + os.Args[0] + " <mode> -h\n"
ob := &bytes.Buffer{} ob := &bytes.Buffer{}

View File

@ -49,6 +49,14 @@ func (p *program) Stop(s service.Service) error {
return nil return nil
} }
func fileExists(filename string) bool {
_, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return true
}
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) { func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
if *configPath == "" { if *configPath == "" {
ex, err := os.Executable() ex, err := os.Executable()
@ -56,6 +64,9 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
panic(err) panic(err)
} }
*configPath = filepath.Dir(ex) + "/config.yaml" *configPath = filepath.Dir(ex) + "/config.yaml"
if !fileExists(*configPath) {
*configPath = filepath.Dir(ex) + "/config.yml"
}
} }
svcConfig := &service.Config{ svcConfig := &service.Config{

View File

@ -7,8 +7,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/imdario/mergo"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
) )
func TestConfig_Load(t *testing.T) { func TestConfig_Load(t *testing.T) {
@ -147,3 +150,77 @@ func TestConfig_ReloadConfig(t *testing.T) {
} }
} }
// Ensure mergo merges are done the way we expect.
// This is needed to test for potential regressions, like:
// - https://github.com/imdario/mergo/issues/187
func TestConfig_MergoMerge(t *testing.T) {
configs := [][]byte{
[]byte(`
listen:
port: 1234
`),
[]byte(`
firewall:
inbound:
- port: 443
proto: tcp
groups:
- server
- port: 443
proto: tcp
groups:
- webapp
`),
[]byte(`
listen:
host: 0.0.0.0
port: 4242
firewall:
outbound:
- port: any
proto: any
host: any
inbound:
- port: any
proto: icmp
host: any
`),
}
var m map[any]any
// merge the same way config.parse() merges
for _, b := range configs {
var nm map[any]any
err := yaml.Unmarshal(b, &nm)
require.NoError(t, err)
// We need to use WithAppendSlice so that firewall rules in separate
// files are appended together
err = mergo.Merge(&nm, m, mergo.WithAppendSlice)
m = nm
require.NoError(t, err)
}
t.Logf("Merged Config: %#v", m)
mYaml, err := yaml.Marshal(m)
require.NoError(t, err)
t.Logf("Merged Config as YAML:\n%s", mYaml)
// If a bug is present, some items might be replaced instead of merged like we expect
expected := map[any]any{
"firewall": map[any]any{
"inbound": []any{
map[any]any{"host": "any", "port": "any", "proto": "icmp"},
map[any]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
map[any]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
"outbound": []any{
map[any]any{"host": "any", "port": "any", "proto": "any"}}},
"listen": map[any]any{
"host": "0.0.0.0",
"port": 4242,
},
}
assert.Equal(t, expected, m)
}

View File

@ -7,7 +7,6 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
) )
// TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet // TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet
@ -15,18 +14,16 @@ import (
type connectionManager struct { type connectionManager struct {
hostMap *HostMap hostMap *HostMap
in map[iputil.VpnIp]struct{} in map[uint32]struct{}
inLock *sync.RWMutex inLock *sync.RWMutex
inCount int out map[uint32]struct{}
out map[iputil.VpnIp]struct{}
outLock *sync.RWMutex outLock *sync.RWMutex
outCount int TrafficTimer *LockingTimerWheel[uint32]
TrafficTimer *SystemTimerWheel
intf *Interface intf *Interface
pendingDeletion map[iputil.VpnIp]int pendingDeletion map[uint32]int
pendingDeletionLock *sync.RWMutex pendingDeletionLock *sync.RWMutex
pendingDeletionTimer *SystemTimerWheel pendingDeletionTimer *LockingTimerWheel[uint32]
checkInterval int checkInterval int
pendingDeletionInterval int pendingDeletionInterval int
@ -38,17 +35,15 @@ type connectionManager struct {
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager { func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager {
nc := &connectionManager{ nc := &connectionManager{
hostMap: intf.hostMap, hostMap: intf.hostMap,
in: make(map[iputil.VpnIp]struct{}), in: make(map[uint32]struct{}),
inLock: &sync.RWMutex{}, inLock: &sync.RWMutex{},
inCount: 0, out: make(map[uint32]struct{}),
out: make(map[iputil.VpnIp]struct{}),
outLock: &sync.RWMutex{}, outLock: &sync.RWMutex{},
outCount: 0, TrafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, time.Second*60),
TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
intf: intf, intf: intf,
pendingDeletion: make(map[iputil.VpnIp]int), pendingDeletion: make(map[uint32]int),
pendingDeletionLock: &sync.RWMutex{}, pendingDeletionLock: &sync.RWMutex{},
pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), pendingDeletionTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, time.Second*60),
checkInterval: checkInterval, checkInterval: checkInterval,
pendingDeletionInterval: pendingDeletionInterval, pendingDeletionInterval: pendingDeletionInterval,
l: l, l: l,
@ -57,41 +52,41 @@ func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface
return nc return nc
} }
func (n *connectionManager) In(ip iputil.VpnIp) { func (n *connectionManager) In(localIndex uint32) {
n.inLock.RLock() n.inLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.in[ip]; ok { if _, ok := n.in[localIndex]; ok {
n.inLock.RUnlock() n.inLock.RUnlock()
return return
} }
n.inLock.RUnlock() n.inLock.RUnlock()
n.inLock.Lock() n.inLock.Lock()
n.in[ip] = struct{}{} n.in[localIndex] = struct{}{}
n.inLock.Unlock() n.inLock.Unlock()
} }
func (n *connectionManager) Out(ip iputil.VpnIp) { func (n *connectionManager) Out(localIndex uint32) {
n.outLock.RLock() n.outLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.out[ip]; ok { if _, ok := n.out[localIndex]; ok {
n.outLock.RUnlock() n.outLock.RUnlock()
return return
} }
n.outLock.RUnlock() n.outLock.RUnlock()
n.outLock.Lock() n.outLock.Lock()
// double check since we dropped the lock temporarily // double check since we dropped the lock temporarily
if _, ok := n.out[ip]; ok { if _, ok := n.out[localIndex]; ok {
n.outLock.Unlock() n.outLock.Unlock()
return return
} }
n.out[ip] = struct{}{} n.out[localIndex] = struct{}{}
n.AddTrafficWatch(ip, n.checkInterval) n.AddTrafficWatch(localIndex, n.checkInterval)
n.outLock.Unlock() n.outLock.Unlock()
} }
func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool { func (n *connectionManager) CheckIn(localIndex uint32) bool {
n.inLock.RLock() n.inLock.RLock()
if _, ok := n.in[vpnIp]; ok { if _, ok := n.in[localIndex]; ok {
n.inLock.RUnlock() n.inLock.RUnlock()
return true return true
} }
@ -99,35 +94,35 @@ func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool {
return false return false
} }
func (n *connectionManager) ClearIP(ip iputil.VpnIp) { func (n *connectionManager) ClearLocalIndex(localIndex uint32) {
n.inLock.Lock() n.inLock.Lock()
n.outLock.Lock() n.outLock.Lock()
delete(n.in, ip) delete(n.in, localIndex)
delete(n.out, ip) delete(n.out, localIndex)
n.inLock.Unlock() n.inLock.Unlock()
n.outLock.Unlock() n.outLock.Unlock()
} }
func (n *connectionManager) ClearPendingDeletion(ip iputil.VpnIp) { func (n *connectionManager) ClearPendingDeletion(localIndex uint32) {
n.pendingDeletionLock.Lock() n.pendingDeletionLock.Lock()
delete(n.pendingDeletion, ip) delete(n.pendingDeletion, localIndex)
n.pendingDeletionLock.Unlock() n.pendingDeletionLock.Unlock()
} }
func (n *connectionManager) AddPendingDeletion(ip iputil.VpnIp) { func (n *connectionManager) AddPendingDeletion(localIndex uint32) {
n.pendingDeletionLock.Lock() n.pendingDeletionLock.Lock()
if _, ok := n.pendingDeletion[ip]; ok { if _, ok := n.pendingDeletion[localIndex]; ok {
n.pendingDeletion[ip] += 1 n.pendingDeletion[localIndex] += 1
} else { } else {
n.pendingDeletion[ip] = 0 n.pendingDeletion[localIndex] = 0
} }
n.pendingDeletionTimer.Add(ip, time.Second*time.Duration(n.pendingDeletionInterval)) n.pendingDeletionTimer.Add(localIndex, time.Second*time.Duration(n.pendingDeletionInterval))
n.pendingDeletionLock.Unlock() n.pendingDeletionLock.Unlock()
} }
func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool { func (n *connectionManager) checkPendingDeletion(localIndex uint32) bool {
n.pendingDeletionLock.RLock() n.pendingDeletionLock.RLock()
if _, ok := n.pendingDeletion[ip]; ok { if _, ok := n.pendingDeletion[localIndex]; ok {
n.pendingDeletionLock.RUnlock() n.pendingDeletionLock.RUnlock()
return true return true
@ -136,8 +131,8 @@ func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool {
return false return false
} }
func (n *connectionManager) AddTrafficWatch(vpnIp iputil.VpnIp, seconds int) { func (n *connectionManager) AddTrafficWatch(localIndex uint32, seconds int) {
n.TrafficTimer.Add(vpnIp, time.Second*time.Duration(seconds)) n.TrafficTimer.Add(localIndex, time.Second*time.Duration(seconds))
} }
func (n *connectionManager) Start(ctx context.Context) { func (n *connectionManager) Start(ctx context.Context) {
@ -164,40 +159,60 @@ func (n *connectionManager) Run(ctx context.Context) {
} }
func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) { func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) {
n.TrafficTimer.advance(now) n.TrafficTimer.Advance(now)
for { for {
ep := n.TrafficTimer.Purge() localIndex, has := n.TrafficTimer.Purge()
if ep == nil { if !has {
break break
} }
vpnIp := ep.(iputil.VpnIp)
// Check for traffic coming back in from this host. // Check for traffic coming back in from this host.
traf := n.CheckIn(vpnIp) traf := n.CheckIn(localIndex)
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp) hostinfo, err := n.hostMap.QueryIndex(localIndex)
if err != nil { if err != nil {
n.l.Debugf("Not found in hostmap: %s", vpnIp) n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
n.ClearIP(vpnIp) n.ClearLocalIndex(localIndex)
n.ClearPendingDeletion(vpnIp) n.ClearPendingDeletion(localIndex)
continue continue
} }
if n.handleInvalidCertificate(now, vpnIp, hostinfo) { if n.handleInvalidCertificate(now, hostinfo) {
continue continue
} }
// Does the vpnIp point to this hostinfo or is it ancillary? If we have ancillary hostinfos then we need to
// decide if this should be the main hostinfo if we are seeing traffic on it
primary, _ := n.hostMap.QueryVpnIp(hostinfo.vpnIp)
mainHostInfo := true
if primary != nil && primary != hostinfo {
mainHostInfo = false
}
// If we saw an incoming packets from this ip and peer's certificate is not // If we saw an incoming packets from this ip and peer's certificate is not
// expired, just ignore. // expired, just ignore.
if traf { if traf {
if n.l.Level >= logrus.DebugLevel { if n.l.Level >= logrus.DebugLevel {
n.l.WithField("vpnIp", vpnIp). hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
n.ClearIP(vpnIp) n.ClearLocalIndex(localIndex)
n.ClearPendingDeletion(vpnIp) n.ClearPendingDeletion(localIndex)
if !mainHostInfo {
if hostinfo.vpnIp > n.intf.myVpnIp {
// We are receiving traffic on the non primary hostinfo and we really just want 1 tunnel. Make
// This the primary and prime the old primary hostinfo for testing
n.hostMap.MakePrimary(hostinfo)
n.Out(primary.localIndexId)
} else {
// This hostinfo is still being used despite not being the primary hostinfo for this vpn ip
// Keep tracking so that we can tear it down when it goes away
n.Out(hostinfo.localIndexId)
}
}
continue continue
} }
@ -205,80 +220,73 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte)
WithField("tunnelCheck", m{"state": "testing", "method": "active"}). WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
Debug("Tunnel status") Debug("Tunnel status")
if hostinfo != nil && hostinfo.ConnectionState != nil { if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
n.intf.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, p, nb, out) n.intf.sendMessageToVpnIp(header.Test, header.TestRequest, hostinfo, p, nb, out)
} else { } else {
hostinfo.logger(n.l).Debugf("Hostinfo sadness: %s", vpnIp) hostinfo.logger(n.l).Debugf("Hostinfo sadness")
} }
n.AddPendingDeletion(vpnIp) n.AddPendingDeletion(localIndex)
} }
} }
func (n *connectionManager) HandleDeletionTick(now time.Time) { func (n *connectionManager) HandleDeletionTick(now time.Time) {
n.pendingDeletionTimer.advance(now) n.pendingDeletionTimer.Advance(now)
for { for {
ep := n.pendingDeletionTimer.Purge() localIndex, has := n.pendingDeletionTimer.Purge()
if ep == nil { if !has {
break break
} }
vpnIp := ep.(iputil.VpnIp) hostinfo, err := n.hostMap.QueryIndex(localIndex)
hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
if err != nil { if err != nil {
n.l.Debugf("Not found in hostmap: %s", vpnIp) n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
n.ClearIP(vpnIp) n.ClearLocalIndex(localIndex)
n.ClearPendingDeletion(vpnIp) n.ClearPendingDeletion(localIndex)
continue continue
} }
if n.handleInvalidCertificate(now, vpnIp, hostinfo) { if n.handleInvalidCertificate(now, hostinfo) {
continue continue
} }
// If we saw an incoming packets from this ip and peer's certificate is not // If we saw an incoming packets from this ip and peer's certificate is not
// expired, just ignore. // expired, just ignore.
traf := n.CheckIn(vpnIp) traf := n.CheckIn(localIndex)
if traf { if traf {
n.l.WithField("vpnIp", vpnIp). hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "alive", "method": "active"}). WithField("tunnelCheck", m{"state": "alive", "method": "active"}).
Debug("Tunnel status") Debug("Tunnel status")
n.ClearIP(vpnIp) n.ClearLocalIndex(localIndex)
n.ClearPendingDeletion(vpnIp) n.ClearPendingDeletion(localIndex)
continue continue
} }
// If it comes around on deletion wheel and hasn't resolved itself, delete // If it comes around on deletion wheel and hasn't resolved itself, delete
if n.checkPendingDeletion(vpnIp) { if n.checkPendingDeletion(localIndex) {
cn := "" cn := ""
if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil { if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil {
cn = hostinfo.ConnectionState.peerCert.Details.Name cn = hostinfo.ConnectionState.peerCert.Details.Name
} }
hostinfo.logger(n.l). hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "dead", "method": "active"}). WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
WithField("certName", cn). WithField("certName", cn).
Info("Tunnel status") Info("Tunnel status")
n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIp)
// TODO: This is only here to let tests work. Should do proper mocking
if n.intf.lightHouse != nil {
n.intf.lightHouse.DeleteVpnIp(vpnIp)
}
n.hostMap.DeleteHostInfo(hostinfo) n.hostMap.DeleteHostInfo(hostinfo)
} else {
n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIp)
} }
n.ClearLocalIndex(localIndex)
n.ClearPendingDeletion(localIndex)
} }
} }
// handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid // handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid
func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil.VpnIp, hostinfo *HostInfo) bool { func (n *connectionManager) handleInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
if !n.intf.disconnectInvalid { if !n.intf.disconnectInvalid {
return false return false
} }
@ -294,8 +302,7 @@ func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil
} }
fingerprint, _ := remoteCert.Sha256Sum() fingerprint, _ := remoteCert.Sha256Sum()
n.l.WithField("vpnIp", vpnIp).WithError(err). hostinfo.logger(n.l).WithError(err).
WithField("certName", remoteCert.Details.Name).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
Info("Remote certificate is no longer valid, tearing down the tunnel") Info("Remote certificate is no longer valid, tearing down the tunnel")
@ -303,7 +310,7 @@ func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil
n.intf.sendCloseTunnel(hostinfo) n.intf.sendCloseTunnel(hostinfo)
n.intf.closeTunnel(hostinfo) n.intf.closeTunnel(hostinfo)
n.ClearIP(vpnIp) n.ClearLocalIndex(hostinfo.localIndexId)
n.ClearPendingDeletion(vpnIp) n.ClearPendingDeletion(hostinfo.localIndexId)
return true return true
} }

View File

@ -18,6 +18,20 @@ import (
var vpnIp iputil.VpnIp var vpnIp iputil.VpnIp
func newTestLighthouse() *LightHouse {
lh := &LightHouse{
l: test.NewLogger(),
addrMap: map[iputil.VpnIp]*RemoteList{},
}
lighthouses := map[iputil.VpnIp]struct{}{}
staticList := map[iputil.VpnIp]struct{}{}
lh.lighthouses.Store(&lighthouses)
lh.staticList.Store(&staticList)
return lh
}
func Test_NewConnectionManagerTest(t *testing.T) { func Test_NewConnectionManagerTest(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
@ -35,7 +49,7 @@ func Test_NewConnectionManagerTest(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} lh := newTestLighthouse()
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
@ -57,16 +71,22 @@ func Test_NewConnectionManagerTest(t *testing.T) {
out := make([]byte, mtu) out := make([]byte, mtu)
nc.HandleMonitorTick(now, p, nb, out) nc.HandleMonitorTick(now, p, nb, out)
// Add an ip we have established a connection w/ to hostmap // Add an ip we have established a connection w/ to hostmap
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil) hostinfo := &HostInfo{
vpnIp: vpnIp,
localIndexId: 1099,
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, certState: cs,
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
} }
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(vpnIp) nc.Out(hostinfo.localIndexId)
assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
// Move ahead 5s. Nothing should happen // Move ahead 5s. Nothing should happen
next_tick := now.Add(5 * time.Second) next_tick := now.Add(5 * time.Second)
nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleMonitorTick(next_tick, p, nb, out)
@ -76,16 +96,17 @@ func Test_NewConnectionManagerTest(t *testing.T) {
nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// This host should now be up for deletion // This host should now be up for deletion
assert.Contains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
// Move ahead some more // Move ahead some more
next_tick = now.Add(45 * time.Second) next_tick = now.Add(45 * time.Second)
nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// The host should be evicted // The host should be evicted
assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.hostMap.Hosts, vpnIp) assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
} }
func Test_NewConnectionManagerTest2(t *testing.T) { func Test_NewConnectionManagerTest2(t *testing.T) {
@ -104,7 +125,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} lh := newTestLighthouse()
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
@ -126,14 +147,19 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
out := make([]byte, mtu) out := make([]byte, mtu)
nc.HandleMonitorTick(now, p, nb, out) nc.HandleMonitorTick(now, p, nb, out)
// Add an ip we have established a connection w/ to hostmap // Add an ip we have established a connection w/ to hostmap
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil) hostinfo := &HostInfo{
vpnIp: vpnIp,
localIndexId: 1099,
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, certState: cs,
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
} }
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(vpnIp) nc.Out(hostinfo.localIndexId)
assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.NotContains(t, nc.pendingDeletion, vpnIp)
assert.Contains(t, nc.hostMap.Hosts, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp)
// Move ahead 5s. Nothing should happen // Move ahead 5s. Nothing should happen
@ -145,18 +171,19 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// This host should now be up for deletion // This host should now be up for deletion
assert.Contains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
// We heard back this time // We heard back this time
nc.In(vpnIp) nc.In(hostinfo.localIndexId)
// Move ahead some more // Move ahead some more
next_tick = now.Add(45 * time.Second) next_tick = now.Add(45 * time.Second)
nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// The host should be evicted // The host should not be evicted
assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
} }
// Check if we can disconnect the peer. // Check if we can disconnect the peer.
@ -213,7 +240,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} lh := newTestLighthouse()
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
@ -243,13 +270,13 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
// Check if to disconnect with invalid certificate. // Check if to disconnect with invalid certificate.
// Should be alive. // Should be alive.
nextTick := now.Add(45 * time.Second) nextTick := now.Add(45 * time.Second)
destroyed := nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo) destroyed := nc.handleInvalidCertificate(nextTick, hostinfo)
assert.False(t, destroyed) assert.False(t, destroyed)
// Move ahead 61s. // Move ahead 61s.
// Check if to disconnect with invalid certificate. // Check if to disconnect with invalid certificate.
// Should be disconnected. // Should be disconnected.
nextTick = now.Add(61 * time.Second) nextTick = now.Add(61 * time.Second)
destroyed = nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo) destroyed = nc.handleInvalidCertificate(nextTick, hostinfo)
assert.True(t, destroyed) assert.True(t, destroyed)
} }

View File

@ -20,7 +20,7 @@ type ConnectionState struct {
certState *CertState certState *CertState
peerCert *cert.NebulaCertificate peerCert *cert.NebulaCertificate
initiator bool initiator bool
atomicMessageCounter uint64 messageCounter atomic.Uint64
window *Bits window *Bits
queueLock sync.Mutex queueLock sync.Mutex
writeLock sync.Mutex writeLock sync.Mutex
@ -70,7 +70,7 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
return json.Marshal(m{ return json.Marshal(m{
"certificate": cs.peerCert, "certificate": cs.peerCert,
"initiator": cs.initiator, "initiator": cs.initiator,
"message_counter": atomic.LoadUint64(&cs.atomicMessageCounter), "message_counter": cs.messageCounter.Load(),
"ready": cs.ready, "ready": cs.ready,
}) })
} }

View File

@ -5,7 +5,6 @@ import (
"net" "net"
"os" "os"
"os/signal" "os/signal"
"sync/atomic"
"syscall" "syscall"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -62,7 +61,7 @@ func (c *Control) Start() {
// Stop signals nebula to shutdown, returns after the shutdown is complete // Stop signals nebula to shutdown, returns after the shutdown is complete
func (c *Control) Stop() { func (c *Control) Stop() {
// Stop the handshakeManager (and other serivces), to prevent new tunnels from // Stop the handshakeManager (and other services), to prevent new tunnels from
// being created while we're shutting them all down. // being created while we're shutting them all down.
c.cancel() c.cancel()
@ -96,12 +95,21 @@ func (c *Control) RebindUDPServer() {
c.f.rebindCount++ c.f.rebindCount++
} }
// ListHostmap returns details about the actual or pending (handshaking) hostmap // ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo { func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
if pendingMap { if pendingMap {
return listHostMap(c.f.handshakeManager.pendingHostMap) return listHostMapHosts(c.f.handshakeManager.pendingHostMap)
} else { } else {
return listHostMap(c.f.hostMap) return listHostMapHosts(c.f.hostMap)
}
}
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
if pendingMap {
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)
} else {
return listHostMapIndexes(c.f.hostMap)
} }
} }
@ -219,7 +227,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
} }
if h.ConnectionState != nil { if h.ConnectionState != nil {
chi.MessageCounter = atomic.LoadUint64(&h.ConnectionState.atomicMessageCounter) chi.MessageCounter = h.ConnectionState.messageCounter.Load()
} }
if c := h.GetCert(); c != nil { if c := h.GetCert(); c != nil {
@ -233,7 +241,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
return chi return chi
} }
func listHostMap(hm *HostMap) []ControlHostInfo { func listHostMapHosts(hm *HostMap) []ControlHostInfo {
hm.RLock() hm.RLock()
hosts := make([]ControlHostInfo, len(hm.Hosts)) hosts := make([]ControlHostInfo, len(hm.Hosts))
i := 0 i := 0
@ -245,3 +253,16 @@ func listHostMap(hm *HostMap) []ControlHostInfo {
return hosts return hosts
} }
func listHostMapIndexes(hm *HostMap) []ControlHostInfo {
hm.RLock()
hosts := make([]ControlHostInfo, len(hm.Indexes))
i := 0
for _, v := range hm.Indexes {
hosts[i] = copyHostInfo(v, hm.preferredRanges)
i++
}
hm.RUnlock()
return hosts
}

View File

@ -6,6 +6,8 @@ package nebula
import ( import (
"net" "net"
"github.com/slackhq/nebula/cert"
"github.com/google/gopacket" "github.com/google/gopacket"
"github.com/google/gopacket/layers" "github.com/google/gopacket/layers"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
@ -14,7 +16,7 @@ import (
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
) )
// WaitForTypeByIndex will pipe all messages from this control device into the pipeTo control device // WaitForType will pipe all messages from this control device into the pipeTo control device
// returning after a message matching the criteria has been piped // returning after a message matching the criteria has been piped
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) { func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{} h := &header.H{}
@ -153,3 +155,11 @@ func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo) c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
return true return true
} }
func (c *Control) GetHostmap() *HostMap {
return c.f.hostMap
}
func (c *Control) GetCert() *cert.NebulaCertificate {
return c.f.certState.certificate
}

View File

@ -1,6 +1,6 @@
[Unit] [Unit]
Description=nebula Description=Nebula overlay networking tool
Wants=basic.target network-online.target Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target network-online.target After=basic.target network.target network-online.target
[Service] [Service]

View File

@ -1,15 +1,14 @@
[Unit] [Unit]
Description=Nebula overlay networking tool Description=Nebula overlay networking tool
Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target network-online.target After=basic.target network.target network-online.target
Before=sshd.service Before=sshd.service
Wants=basic.target network-online.target
[Service] [Service]
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
Restart=always Restart=always
SyslogIdentifier=nebula
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"net" "net"
"strconv" "strconv"
"strings"
"sync" "sync"
"github.com/miekg/dns" "github.com/miekg/dns"
@ -33,11 +34,10 @@ func newDnsRecords(hostMap *HostMap) *dnsRecords {
func (d *dnsRecords) Query(data string) string { func (d *dnsRecords) Query(data string) string {
d.RLock() d.RLock()
if r, ok := d.dnsMap[data]; ok { defer d.RUnlock()
d.RUnlock() if r, ok := d.dnsMap[strings.ToLower(data)]; ok {
return r return r
} }
d.RUnlock()
return "" return ""
} }
@ -62,8 +62,8 @@ func (d *dnsRecords) QueryCert(data string) string {
func (d *dnsRecords) Add(host, data string) { func (d *dnsRecords) Add(host, data string) {
d.Lock() d.Lock()
d.dnsMap[host] = data defer d.Unlock()
d.Unlock() d.dnsMap[strings.ToLower(host)] = data
} }
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) { func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {

View File

@ -19,10 +19,10 @@ import (
func BenchmarkHotPath(b *testing.B) { func BenchmarkHotPath(b *testing.B) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) myControl, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse // Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Start the servers // Start the servers
myControl.Start() myControl.Start()
@ -32,7 +32,7 @@ func BenchmarkHotPath(b *testing.B) {
r.CancelFlowLogs() r.CancelFlowLogs()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
_ = r.RouteForAllUntilTxTun(theirControl) _ = r.RouteForAllUntilTxTun(theirControl)
} }
@ -42,18 +42,18 @@ func BenchmarkHotPath(b *testing.B) {
func TestGoodHandshake(t *testing.T) { func TestGoodHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse // Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Start the servers // Start the servers
myControl.Start() myControl.Start()
theirControl.Start() theirControl.Start()
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side") t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
t.Log("Have them consume my stage 0 packet. They have a tunnel now") t.Log("Have them consume my stage 0 packet. They have a tunnel now")
theirControl.InjectUDPPacket(myControl.GetFromUDP(true)) theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
@ -74,17 +74,18 @@ func TestGoodHandshake(t *testing.T) {
myControl.WaitForType(1, 0, theirControl) myControl.WaitForType(1, 0, theirControl)
t.Log("Make sure our host infos are correct") t.Log("Make sure our host infos are correct")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl) assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
t.Log("Get that cached packet and make sure it looks right") t.Log("Get that cached packet and make sure it looks right")
myCachedPacket := theirControl.GetFromTun(true) myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
t.Log("Do a bidirectional tunnel test") t.Log("Do a bidirectional tunnel test")
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow() defer r.RenderFlow()
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
myControl.Stop() myControl.Stop()
theirControl.Stop() theirControl.Stop()
//TODO: assert hostmaps //TODO: assert hostmaps
@ -96,15 +97,15 @@ func TestWrongResponderHandshake(t *testing.T) {
// The IPs here are chosen on purpose: // The IPs here are chosen on purpose:
// The current remote handling will sort by preference, public, and then lexically. // The current remote handling will sort by preference, public, and then lexically.
// So we need them to have a higher address than evil (we could apply a preference though) // So we need them to have a higher address than evil (we could apply a preference though)
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil) myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil) theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil) evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
// Add their real udp addr, which should be tried after evil. // Add their real udp addr, which should be tried after evil.
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse. // Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
myControl.InjectLightHouseAddr(theirVpnIp, evilUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl, evilControl) r := router.NewR(t, myControl, theirControl, evilControl)
@ -116,7 +117,7 @@ func TestWrongResponderHandshake(t *testing.T) {
evilControl.Start() evilControl.Start()
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them") t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType { r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
h := &header.H{} h := &header.H{}
err := h.Parse(p.Data) err := h.Parse(p.Data)
@ -135,34 +136,38 @@ func TestWrongResponderHandshake(t *testing.T) {
t.Log("My cached packet should be received by them") t.Log("My cached packet should be received by them")
myCachedPacket := theirControl.GetFromTun(true) myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
t.Log("Test the tunnel with them") t.Log("Test the tunnel with them")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl) assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
t.Log("Flush all packets from all controllers") t.Log("Flush all packets from all controllers")
r.FlushAll() r.FlushAll()
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil") t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), true), "My pending hostmap should not contain evil") assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), false), "My main hostmap should not contain evil") assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil")
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete //NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
//TODO: assert hostmaps for everyone //TODO: assert hostmaps for everyone
r.RenderHostmaps("Final hostmaps", myControl, theirControl, evilControl)
t.Log("Success!") t.Log("Success!")
myControl.Stop() myControl.Stop()
theirControl.Stop() theirControl.Stop()
} }
func Test_Case1_Stage1Race(t *testing.T) { func TestStage1Race(t *testing.T) {
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
// But will eventually collapse down to a single tunnel
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse and vice versa // Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIp, myUdpAddr) theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
@ -173,8 +178,8 @@ func Test_Case1_Stage1Race(t *testing.T) {
theirControl.Start() theirControl.Start()
t.Log("Trigger a handshake to start on both me and them") t.Log("Trigger a handshake to start on both me and them")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIp, 80, 80, []byte("Hi from them")) theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
t.Log("Get both stage 1 handshake packets") t.Log("Get both stage 1 handshake packets")
myHsForThem := myControl.GetFromUDP(true) myHsForThem := myControl.GetFromUDP(true)
@ -183,43 +188,165 @@ func Test_Case1_Stage1Race(t *testing.T) {
r.Log("Now inject both stage 1 handshake packets") r.Log("Now inject both stage 1 handshake packets")
r.InjectUDPPacket(theirControl, myControl, theirHsForMe) r.InjectUDPPacket(theirControl, myControl, theirHsForMe)
r.InjectUDPPacket(myControl, theirControl, myHsForThem) r.InjectUDPPacket(myControl, theirControl, myHsForThem)
//TODO: they should win, grab their index for me and make sure I use it in the end.
r.Log("They should not have a stage 2 (won the race) but I should send one") r.Log("Route until they receive a message packet")
r.InjectUDPPacket(myControl, theirControl, myControl.GetFromUDP(true)) myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.Log("Route for me until I send a message packet to them") r.Log("Their cached packet should be received by me")
r.RouteForAllUntilAfterMsgTypeTo(theirControl, header.Message, header.MessageNone) theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
t.Log("My cached packet should be received by them") r.Log("Do a bidirectional tunnel test")
myCachedPacket := theirControl.GetFromTun(true) assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
t.Log("Route for them until I send a message packet to me") myHostmapHosts := myControl.ListHostmapHosts(false)
theirControl.WaitForType(1, 0, myControl) myHostmapIndexes := myControl.ListHostmapIndexes(false)
theirHostmapHosts := theirControl.ListHostmapHosts(false)
theirHostmapIndexes := theirControl.ListHostmapIndexes(false)
t.Log("Their cached packet should be received by me") // We should have two tunnels on both sides
theirCachedPacket := myControl.GetFromTun(true) assert.Len(t, myHostmapHosts, 1)
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIp, myVpnIp, 80, 80) assert.Len(t, theirHostmapHosts, 1)
assert.Len(t, myHostmapIndexes, 2)
assert.Len(t, theirHostmapIndexes, 2)
t.Log("Do a bidirectional tunnel test") r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
r.Log("Spin until connection manager tears down a tunnel")
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second)
}
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
// We should only have a single tunnel now on both sides
assert.Len(t, myFinalHostmapHosts, 1)
assert.Len(t, theirFinalHostmapHosts, 1)
assert.Len(t, myFinalHostmapIndexes, 1)
assert.Len(t, theirFinalHostmapIndexes, 1)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
myControl.Stop() myControl.Stop()
theirControl.Stop() theirControl.Stop()
//TODO: assert hostmaps }
func TestUncleanShutdownRaceLoser(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
r.Log("Trigger a handshake from me to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.Log("Nuke my hostmap")
myHostmap := myControl.GetHostmap()
myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again"))
p = r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.Log("Assert the tunnel works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
r.Log("Wait for the dead index to go away")
start := len(theirControl.GetHostmap().Indexes)
for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
if len(theirControl.GetHostmap().Indexes) < start {
break
}
time.Sleep(time.Second)
}
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
}
func TestUncleanShutdownRaceWinner(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
r.Log("Trigger a handshake from me to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
r.Log("Nuke my hostmap")
theirHostmap := theirControl.GetHostmap()
theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again"))
p = r.RouteForAllUntilTxTun(myControl)
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
r.Log("Assert the tunnel works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
r.Log("Wait for the dead index to go away")
start := len(myControl.GetHostmap().Indexes)
for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
if len(myControl.GetHostmap().Indexes) < start {
break
}
time.Sleep(time.Second)
}
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
} }
func TestRelays(t *testing.T) { func TestRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIp, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) myControl, myVpnIpNet, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIp, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) relayControl, relayVpnIpNet, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay // Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIp, relayUdpAddr) myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
myControl.InjectRelays(theirVpnIp, []net.IP{relayVpnIp}) myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
relayControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl) r := router.NewR(t, myControl, relayControl, theirControl)
@ -231,11 +358,84 @@ func TestRelays(t *testing.T) {
theirControl.Start() theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay") t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl) p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIp, theirVpnIp, 80, 80) r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it //TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
} }
func TestStage1RaceRelays(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
r.Log("Trigger a handshake to start on both me and relay")
myControl.InjectTunUDPPacket(relayVpnIpNet.IP, 80, 80, []byte("Hi from me"))
relayControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from relay"))
r.Log("Get both stage 1 handshake packets")
//TODO: this is where it breaks, we need to get the hs packets for the relay not for the destination
myHsForThem := myControl.GetFromUDP(true)
relayHsForMe := relayControl.GetFromUDP(true)
r.Log("Now inject both stage 1 handshake packets")
r.InjectUDPPacket(relayControl, myControl, relayHsForMe)
r.InjectUDPPacket(myControl, relayControl, myHsForThem)
r.Log("Route for me until I send a message packet to relay")
r.RouteForAllUntilAfterMsgTypeTo(relayControl, header.Message, header.MessageNone)
r.Log("My cached packet should be received by relay")
myCachedPacket := relayControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, relayVpnIpNet.IP, 80, 80)
r.Log("Relays cached packet should be received by me")
relayCachedPacket := r.RouteForAllUntilTxTun(myControl)
assertUdpPacket(t, []byte("Hi from relay"), relayCachedPacket, relayVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
r.Log("Do a bidirectional tunnel test; me and relay")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
r.Log("Create a tunnel between relay and them")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
r.RenderHostmaps("Starting hostmaps", myControl, relayControl, theirControl)
r.Log("Trigger a handshake to start from me to them via the relay")
//TODO: if we initiate a handshake from me and then assert the tunnel it will cause a relay control race that can blow up
// this is a problem that exists on master today
//myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
myControl.Stop()
theirControl.Stop()
relayControl.Stop()
//
////TODO: assert hostmaps
}
//TODO: add a test with many lies //TODO: add a test with many lies

View File

@ -30,7 +30,7 @@ import (
type m map[string]interface{} type m map[string]interface{}
// newSimpleServer creates a nebula instance with many assumptions // newSimpleServer creates a nebula instance with many assumptions
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, net.IP, *net.UDPAddr) { func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr) {
l := NewTestLogger() l := NewTestLogger()
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}} vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
@ -101,7 +101,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
panic(err) panic(err)
} }
return control, vpnIpNet.IP, &udpAddr return control, vpnIpNet, &udpAddr
} }
// newTestCaCert will generate a CA cert // newTestCaCert will generate a CA cert
@ -231,12 +231,12 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) { func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
// Send a packet from them to me // Send a packet from them to me
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B")) controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
bPacket := r.RouteUntilTxTun(controlB, controlA) bPacket := r.RouteForAllUntilTxTun(controlA)
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80) assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
// And once more from me to them // And once more from me to them
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A")) controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
aPacket := r.RouteUntilTxTun(controlA, controlB) aPacket := r.RouteForAllUntilTxTun(controlB)
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80) assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
} }

139
e2e/router/hostmap.go Normal file
View File

@ -0,0 +1,139 @@
//go:build e2e_testing
// +build e2e_testing
package router
import (
"fmt"
"sort"
"strings"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/iputil"
)
type edge struct {
from string
to string
dual bool
}
func renderHostmaps(controls ...*nebula.Control) string {
var lines []*edge
r := "graph TB\n"
for _, c := range controls {
sr, se := renderHostmap(c)
r += sr
for _, e := range se {
add := true
// Collapse duplicate edges into a bi-directionally connected edge
for _, ge := range lines {
if e.to == ge.from && e.from == ge.to {
add = false
ge.dual = true
break
}
}
if add {
lines = append(lines, e)
}
}
}
for _, line := range lines {
if line.dual {
r += fmt.Sprintf("\t%v <--> %v\n", line.from, line.to)
} else {
r += fmt.Sprintf("\t%v --> %v\n", line.from, line.to)
}
}
return r
}
func renderHostmap(c *nebula.Control) (string, []*edge) {
var lines []string
var globalLines []*edge
clusterName := strings.Trim(c.GetCert().Details.Name, " ")
clusterVpnIp := c.GetCert().Details.Ips[0].IP
r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp)
hm := c.GetHostmap()
// Draw the vpn to index nodes
r += fmt.Sprintf("\t\tsubgraph %s.hosts[\"Hosts (vpn ip to index)\"]\n", clusterName)
for _, vpnIp := range sortedHosts(hm.Hosts) {
hi := hm.Hosts[vpnIp]
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, vpnIp, vpnIp)
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, hi.GetLocalIndex()))
rs := hi.GetRelayState()
for _, relayIp := range rs.CopyRelayIps() {
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
}
for _, relayIp := range rs.CopyRelayForIdxs() {
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp))
}
}
r += "\t\tend\n"
// Draw the relay hostinfos
if len(hm.Relays) > 0 {
r += fmt.Sprintf("\t\tsubgraph %s.relays[\"Relays (relay index to hostinfo)\"]\n", clusterName)
for relayIndex, hi := range hm.Relays {
r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, relayIndex, relayIndex)
lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, relayIndex, clusterName, hi.GetLocalIndex()))
}
r += "\t\tend\n"
}
// Draw the local index to relay or remote index nodes
r += fmt.Sprintf("\t\tsubgraph indexes.%s[\"Indexes (index to hostinfo)\"]\n", clusterName)
for _, idx := range sortedIndexes(hm.Indexes) {
hi := hm.Indexes[idx]
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp())
remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ")
globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())})
_ = hi
}
r += "\t\tend\n"
// Add the edges inside this host
for _, line := range lines {
r += fmt.Sprintf("\t\t%v\n", line)
}
r += "\tend\n"
return r, globalLines
}
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp {
keys := make([]iputil.VpnIp, 0, len(hosts))
for key := range hosts {
keys = append(keys, key)
}
sort.SliceStable(keys, func(i, j int) bool {
return keys[i] > keys[j]
})
return keys
}
func sortedIndexes(indexes map[uint32]*nebula.HostInfo) []uint32 {
keys := make([]uint32, 0, len(indexes))
for key := range indexes {
keys = append(keys, key)
}
sort.SliceStable(keys, func(i, j int) bool {
return keys[i] > keys[j]
})
return keys
}

View File

@ -10,6 +10,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -22,6 +23,7 @@ import (
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
"golang.org/x/exp/maps"
) )
type R struct { type R struct {
@ -40,8 +42,13 @@ type R struct {
// A map of vpn ip to the nebula control it belongs to // A map of vpn ip to the nebula control it belongs to
vpnControls map[iputil.VpnIp]*nebula.Control vpnControls map[iputil.VpnIp]*nebula.Control
ignoreFlows []ignoreFlow
flow []flowEntry flow []flowEntry
// A set of additional mermaid graphs to draw in the flow log markdown file
// Currently consisting only of hostmap renders
additionalGraphs []mermaidGraph
// All interactions are locked to help serialize behavior // All interactions are locked to help serialize behavior
sync.Mutex sync.Mutex
@ -50,6 +57,24 @@ type R struct {
t testing.TB t testing.TB
} }
type ignoreFlow struct {
tun NullBool
messageType header.MessageType
subType header.MessageSubType
//from
//to
}
type mermaidGraph struct {
title string
content string
}
type NullBool struct {
HasValue bool
IsTrue bool
}
type flowEntry struct { type flowEntry struct {
note string note string
packet *packet packet *packet
@ -98,6 +123,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
inNat: make(map[string]*nebula.Control), inNat: make(map[string]*nebula.Control),
outNat: make(map[string]net.UDPAddr), outNat: make(map[string]net.UDPAddr),
flow: []flowEntry{}, flow: []flowEntry{},
ignoreFlows: []ignoreFlow{},
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())), fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
t: t, t: t,
cancelRender: cancel, cancelRender: cancel,
@ -126,6 +152,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
case <-ctx.Done(): case <-ctx.Done():
return return
case <-clockSource.C: case <-clockSource.C:
r.renderHostmaps("clock tick")
r.renderFlow() r.renderFlow()
} }
} }
@ -196,11 +223,16 @@ func (r *R) renderFlow() {
) )
} }
if len(participantsVals) > 2 {
// Get the first and last participantVals for notes
participantsVals = []string{participantsVals[0], participantsVals[len(participantsVals)-1]}
}
// Print packets // Print packets
h := &header.H{} h := &header.H{}
for _, e := range r.flow { for _, e := range r.flow {
if e.packet == nil { if e.packet == nil {
fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note) //fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note)
continue continue
} }
@ -219,15 +251,77 @@ func (r *R) renderFlow() {
} }
fmt.Fprintf(f, fmt.Fprintf(f,
" %s%s%s: %s(%s), counter: %v\n", " %s%s%s: %s(%s), index %v, counter: %v\n",
strings.Replace(p.from.GetUDPAddr(), ":", "#58;", 1), strings.Replace(p.from.GetUDPAddr(), ":", "#58;", 1),
line, line,
strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1), strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1),
h.TypeName(), h.SubTypeName(), h.MessageCounter, h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
) )
} }
} }
fmt.Fprintln(f, "```") fmt.Fprintln(f, "```")
for _, g := range r.additionalGraphs {
fmt.Fprintf(f, "## %s\n", g.title)
fmt.Fprintln(f, "```mermaid")
fmt.Fprintln(f, g.content)
fmt.Fprintln(f, "```")
}
}
// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria.
// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets
// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered
func (r *R) IgnoreFlow(messageType header.MessageType, subType header.MessageSubType, tun NullBool) {
r.Lock()
defer r.Unlock()
r.ignoreFlows = append(r.ignoreFlows, ignoreFlow{
tun,
messageType,
subType,
})
}
func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
r.Lock()
defer r.Unlock()
s := renderHostmaps(controls...)
if len(r.additionalGraphs) > 0 {
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
if lastGraph.content == s && lastGraph.title == title {
// Ignore this rendering if it matches the last rendering added
// This is useful if you want to track rendering changes
return
}
}
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
title: title,
content: s,
})
}
func (r *R) renderHostmaps(title string) {
c := maps.Values(r.controls)
sort.SliceStable(c, func(i, j int) bool {
return c[i].GetVpnIp() > c[j].GetVpnIp()
})
s := renderHostmaps(c...)
if len(r.additionalGraphs) > 0 {
lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1]
if lastGraph.content == s {
// Ignore this rendering if it matches the last rendering added
// This is useful if you want to track rendering changes
return
}
}
r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{
title: title,
content: s,
})
} }
// InjectFlow can be used to record packet flow if the test is handling the routing on its own. // InjectFlow can be used to record packet flow if the test is handling the routing on its own.
@ -268,6 +362,26 @@ func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool
return nil return nil
} }
r.renderHostmaps(fmt.Sprintf("Packet %v", len(r.flow)))
if len(r.ignoreFlows) > 0 {
var h header.H
err := h.Parse(p.Data)
if err != nil {
panic(err)
}
for _, i := range r.ignoreFlows {
if !tun {
if i.messageType == h.Type && i.subType == h.Subtype {
return nil
}
} else if i.tun.HasValue && i.tun.IsTrue {
return nil
}
}
}
fp := &packet{ fp := &packet{
from: from, from: from,
to: to, to: to,

View File

@ -47,8 +47,9 @@ lighthouse:
# allowed. You can provide CIDRs here with `true` to allow and `false` to # allowed. You can provide CIDRs here with `true` to allow and `false` to
# deny. The most specific CIDR rule applies to each remote. If all rules are # deny. The most specific CIDR rule applies to each remote. If all rules are
# "allow", the default will be "deny", and vice-versa. If both "allow" and # "allow", the default will be "deny", and vice-versa. If both "allow" and
# "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the # "deny" IPv4 rules are present, then you MUST set a rule for "0.0.0.0/0" as
# default. # the default. Similarly if both "allow" and "deny" IPv6 rules are present,
# then you MUST set a rule for "::/0" as the default.
#remote_allow_list: #remote_allow_list:
# Example to block IPs from this subnet from being used for remote IPs. # Example to block IPs from this subnet from being used for remote IPs.
#"172.16.0.0/12": false #"172.16.0.0/12": false
@ -58,7 +59,7 @@ lighthouse:
#"10.0.0.0/8": false #"10.0.0.0/8": false
#"10.42.42.0/24": true #"10.42.42.0/24": true
# EXPERIMENTAL: This option my change or disappear in the future. # EXPERIMENTAL: This option may change or disappear in the future.
# Optionally allows the definition of remote_allow_list blocks # Optionally allows the definition of remote_allow_list blocks
# specific to an inside VPN IP CIDR. # specific to an inside VPN IP CIDR.
#remote_allow_ranges: #remote_allow_ranges:
@ -133,7 +134,7 @@ punchy:
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes # Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously! # IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
#cipher: chachapoly #cipher: aes
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest # Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
# path to a network adjacent nebula node. # path to a network adjacent nebula node.

View File

@ -1,7 +1,8 @@
[Unit] [Unit]
Description=nebula Description=Nebula overlay networking tool
Wants=basic.target Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target After=basic.target network.target network-online.target
Before=sshd.service
[Service] [Service]
SyslogIdentifier=nebula SyslogIdentifier=nebula

View File

@ -0,0 +1,34 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>KeepAlive</key>
<true/>
<key>Label</key>
<string>net.defined.nebula</string>
<key>WorkingDirectory</key>
<string>/Users/{username}/.local/bin/nebula</string>
<key>LimitLoadToSessionType</key>
<array>
<string>Aqua</string>
<string>Background</string>
<string>LoginWindow</string>
<string>StandardIO</string>
<string>System</string>
</array>
<key>ProgramArguments</key>
<array>
<string>./nebula</string>
<string>-config</string>
<string>./config.yml</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>StandardErrorPath</key>
<string>./nebula.log</string>
<key>StandardOutPath</key>
<string>./nebula.log</string>
<key>UserName</key>
<string>root</string>
</dict>
</plist>

View File

@ -1,7 +1,7 @@
[Unit] [Unit]
Description=nebula Description=Nebula overlay networking tool
Wants=basic.target Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target After=basic.target network.target network-online.target
Before=sshd.service Before=sshd.service
[Service] [Service]

View File

@ -77,7 +77,7 @@ type FirewallConntrack struct {
sync.Mutex sync.Mutex
Conns map[firewall.Packet]*conn Conns map[firewall.Packet]*conn
TimerWheel *TimerWheel TimerWheel *TimerWheel[firewall.Packet]
} }
type FirewallTable struct { type FirewallTable struct {
@ -145,7 +145,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
return &Firewall{ return &Firewall{
Conntrack: &FirewallConntrack{ Conntrack: &FirewallConntrack{
Conns: make(map[firewall.Packet]*conn), Conns: make(map[firewall.Packet]*conn),
TimerWheel: NewTimerWheel(min, max), TimerWheel: NewTimerWheel[firewall.Packet](min, max),
}, },
InRules: newFirewallTable(), InRules: newFirewallTable(),
OutRules: newFirewallTable(), OutRules: newFirewallTable(),
@ -510,6 +510,7 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
conntrack := f.Conntrack conntrack := f.Conntrack
conntrack.Lock() conntrack.Lock()
if _, ok := conntrack.Conns[fp]; !ok { if _, ok := conntrack.Conns[fp]; !ok {
conntrack.TimerWheel.Advance(time.Now())
conntrack.TimerWheel.Add(fp, timeout) conntrack.TimerWheel.Add(fp, timeout)
} }
@ -537,6 +538,7 @@ func (f *Firewall) evict(p firewall.Packet) {
// Timeout is in the future, re-add the timer // Timeout is in the future, re-add the timer
if newT > 0 { if newT > 0 {
conntrack.TimerWheel.Advance(time.Now())
conntrack.TimerWheel.Add(p, newT) conntrack.TimerWheel.Add(p, newT)
return return
} }
@ -879,7 +881,7 @@ func parsePort(s string) (startPort, endPort int32, err error) {
return return
} }
//TODO: write tests for these // TODO: write tests for these
func setTCPRTTTracking(c *conn, p []byte) { func setTCPRTTTracking(c *conn, p []byte) {
if c.Seq != 0 { if c.Seq != 0 {
return return

View File

@ -13,7 +13,7 @@ type ConntrackCache map[Packet]struct{}
type ConntrackCacheTicker struct { type ConntrackCacheTicker struct {
cacheV uint64 cacheV uint64
cacheTick uint64 cacheTick atomic.Uint64
cache ConntrackCache cache ConntrackCache
} }
@ -35,7 +35,7 @@ func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
func (c *ConntrackCacheTicker) tick(d time.Duration) { func (c *ConntrackCacheTicker) tick(d time.Duration) {
for { for {
time.Sleep(d) time.Sleep(d)
atomic.AddUint64(&c.cacheTick, 1) c.cacheTick.Add(1)
} }
} }
@ -45,7 +45,7 @@ func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache {
if c == nil { if c == nil {
return nil return nil
} }
if tick := atomic.LoadUint64(&c.cacheTick); tick != c.cacheV { if tick := c.cacheTick.Load(); tick != c.cacheV {
c.cacheV = tick c.cacheV = tick
if ll := len(c.cache); ll > 0 { if ll := len(c.cache); ll > 0 {
if l.Level == logrus.DebugLevel { if l.Level == logrus.DebugLevel {

View File

@ -34,27 +34,27 @@ func TestNewFirewall(t *testing.T) {
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c) fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c) fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c) fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c) fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c) fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
} }
func TestFirewall_AddRule(t *testing.T) { func TestFirewall_AddRule(t *testing.T) {

42
go.mod
View File

@ -1,6 +1,6 @@
module github.com/slackhq/nebula module github.com/slackhq/nebula
go 1.18 go 1.19
require ( require (
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
@ -9,23 +9,24 @@ require (
github.com/flynn/noise v1.0.0 github.com/flynn/noise v1.0.0
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/google/gopacket v1.1.19 github.com/google/gopacket v1.1.19
github.com/imdario/mergo v0.3.8 github.com/imdario/mergo v0.3.13
github.com/kardianos/service v1.2.1 github.com/kardianos/service v1.2.2
github.com/miekg/dns v1.1.48 github.com/miekg/dns v1.1.50
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
github.com/prometheus/client_golang v1.12.1 github.com/prometheus/client_golang v1.14.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.9.0
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
github.com/stretchr/testify v1.7.1 github.com/stretchr/testify v1.8.1
github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netlink v1.1.0
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 golang.org/x/crypto v0.3.0
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 golang.org/x/net v0.2.0
golang.org/x/sys v0.2.0
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
golang.zx2c4.com/wireguard/windows v0.5.3 golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/protobuf v1.28.0 google.golang.org/protobuf v1.28.1
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
@ -34,15 +35,14 @@ require (
github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.33.0 // indirect github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect github.com/vishvananda/netns v0.0.1 // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/mod v0.7.0 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/term v0.2.0 // indirect
golang.org/x/tools v0.1.10 // indirect golang.org/x/tools v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
) )

99
go.sum
View File

@ -119,8 +119,8 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
@ -139,8 +139,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -150,8 +150,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -163,12 +163,11 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.48 h1:Ucfr7IIVyMBz4lRE8qmGUuZ4Wt3/ZGu9hmcMT3Uu4tQ= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@ -187,57 +186,62 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.1 h1:JDkWS7Axy5ziNM3svylLhpSgqjPDb+BgVUbXoDo+iPw=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.1/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -250,10 +254,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o=
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -264,6 +266,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -285,8 +289,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -320,14 +324,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0= golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -345,8 +345,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -361,7 +361,6 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -383,7 +382,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -392,16 +390,15 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 h1:PRD0hj6tTuUnCFD08vkvjkYFbQg/9lV8KIxe1y4/cvU= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -409,7 +406,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -456,13 +452,11 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY= golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY=
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
@ -543,8 +537,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -560,8 +554,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -1,7 +1,6 @@
package nebula package nebula
import ( import (
"sync/atomic"
"time" "time"
"github.com/flynn/noise" "github.com/flynn/noise"
@ -60,7 +59,7 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
} }
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1) h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
atomic.AddUint64(&ci.atomicMessageCounter, 1) ci.messageCounter.Add(1)
msg, _, _, err := ci.H.WriteMessage(h, hsBytes) msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
if err != nil { if err != nil {
@ -243,9 +242,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
hostinfo.SetRemote(addr) hostinfo.SetRemote(addr)
hostinfo.CreateRemoteCIDR(remoteCert) hostinfo.CreateRemoteCIDR(remoteCert)
// Only overwrite existing record if we should win the handshake race existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f)
overwrite := vpnIp > f.myVpnIp
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, overwrite, f)
if err != nil { if err != nil {
switch err { switch err {
case ErrAlreadySeen: case ErrAlreadySeen:
@ -323,16 +320,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp). WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
Error("Failed to add HostInfo due to localIndex collision") Error("Failed to add HostInfo due to localIndex collision")
return return
case ErrExistingHandshake:
// We have a race where both parties think they are an initiator and this tunnel lost, let the other one finish
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Error("Prevented a pending handshake race")
return
default: default:
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete // Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
// And we forget to update it here // And we forget to update it here
@ -394,6 +381,12 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b
Info("Handshake message sent") Info("Handshake message sent")
} }
if existing != nil {
// Make sure we are tracking the old primary if there was one, it needs to go away eventually
f.connectionManager.Out(existing.localIndexId)
}
f.connectionManager.Out(hostinfo.localIndexId)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics) hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
return return
@ -571,8 +564,12 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *
hostinfo.CreateRemoteCIDR(remoteCert) hostinfo.CreateRemoteCIDR(remoteCert)
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp // Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
//TODO: Complete here does not do a race avoidance, it will just take the new tunnel. Is this ok? existing := f.handshakeManager.Complete(hostinfo, f)
f.handshakeManager.Complete(hostinfo, f) if existing != nil {
// Make sure we are tracking the old primary if there was one, it needs to go away eventually
f.connectionManager.Out(existing.localIndexId)
}
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics) hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
f.metricHandshakes.Update(duration) f.metricHandshakes.Update(duration)

View File

@ -47,7 +47,7 @@ type HandshakeManager struct {
lightHouse *LightHouse lightHouse *LightHouse
outside *udp.Conn outside *udp.Conn
config HandshakeConfig config HandshakeConfig
OutboundHandshakeTimer *SystemTimerWheel OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
messageMetrics *MessageMetrics messageMetrics *MessageMetrics
metricInitiated metrics.Counter metricInitiated metrics.Counter
metricTimedOut metrics.Counter metricTimedOut metrics.Counter
@ -56,6 +56,10 @@ type HandshakeManager struct {
multiPort MultiPortConfig multiPort MultiPortConfig
udpRaw *udp.RawConn udpRaw *udp.RawConn
// vpnIps is another map similar to the pending hostmap but tracks entries in the wheel instead
// this is to avoid situations where the same vpn ip enters the wheel and causes rapid fire handshaking
vpnIps map[iputil.VpnIp]struct{}
// can be used to trigger outbound handshake for the given vpnIp // can be used to trigger outbound handshake for the given vpnIp
trigger chan iputil.VpnIp trigger chan iputil.VpnIp
} }
@ -68,7 +72,8 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
outside: outside, outside: outside,
config: config, config: config,
trigger: make(chan iputil.VpnIp, config.triggerBuffer), trigger: make(chan iputil.VpnIp, config.triggerBuffer),
OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, hsTimeout(config.retries, config.tryInterval)), OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
vpnIps: map[iputil.VpnIp]struct{}{},
messageMetrics: config.messageMetrics, messageMetrics: config.messageMetrics,
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil), metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil), metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
@ -93,13 +98,12 @@ func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) {
} }
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) { func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) {
c.OutboundHandshakeTimer.advance(now) c.OutboundHandshakeTimer.Advance(now)
for { for {
ep := c.OutboundHandshakeTimer.Purge() vpnIp, has := c.OutboundHandshakeTimer.Purge()
if ep == nil { if !has {
break break
} }
vpnIp := ep.(iputil.VpnIp)
c.handleOutbound(vpnIp, f, false) c.handleOutbound(vpnIp, f, false)
} }
} }
@ -107,6 +111,7 @@ func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.E
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) { func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) {
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp) hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
if err != nil { if err != nil {
delete(c.vpnIps, vpnIp)
return return
} }
hostinfo.Lock() hostinfo.Lock()
@ -150,7 +155,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
// Get a remotes object if we don't already have one. // Get a remotes object if we don't already have one.
// This is mainly to protect us as this should never be the case // This is mainly to protect us as this should never be the case
// NB ^ This comment doesn't jive. It's how the thing gets intiailized. // NB ^ This comment doesn't jive. It's how the thing gets initialized.
// It's the common path. Should it update every time, in case a future LH query/queries give us more info? // It's the common path. Should it update every time, in case a future LH query/queries give us more info?
if hostinfo.remotes == nil { if hostinfo.remotes == nil {
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp) hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
@ -164,7 +169,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
c.lightHouse.QueryServer(vpnIp, f) c.lightHouse.QueryServer(vpnIp, f)
} }
// Send a the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply // Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
var sentTo []*udp.Addr var sentTo []*udp.Addr
var sentMultiport bool var sentMultiport bool
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) { hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
@ -287,7 +292,6 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add // If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
if !lighthouseTriggered { if !lighthouseTriggered {
//TODO: feel like we dupe handshake real fast in a tight loop, why?
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
} }
} }
@ -296,7 +300,10 @@ func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *H
hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init) hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init)
if created { if created {
if _, ok := c.vpnIps[vpnIp]; !ok {
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval) c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
}
c.vpnIps[vpnIp] = struct{}{}
c.metricInitiated.Inc(1) c.metricInitiated.Inc(1)
} }
@ -307,7 +314,6 @@ var (
ErrExistingHostInfo = errors.New("existing hostinfo") ErrExistingHostInfo = errors.New("existing hostinfo")
ErrAlreadySeen = errors.New("already seen") ErrAlreadySeen = errors.New("already seen")
ErrLocalIndexCollision = errors.New("local index collision") ErrLocalIndexCollision = errors.New("local index collision")
ErrExistingHandshake = errors.New("existing handshake")
) )
// CheckAndComplete checks for any conflicts in the main and pending hostmap // CheckAndComplete checks for any conflicts in the main and pending hostmap
@ -321,7 +327,7 @@ var (
// //
// ErrLocalIndexCollision if we already have an entry in the main or pending // ErrLocalIndexCollision if we already have an entry in the main or pending
// hostmap for the hostinfo.localIndexId. // hostmap for the hostinfo.localIndexId.
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, overwrite bool, f *Interface) (*HostInfo, error) { func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
c.pendingHostMap.Lock() c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock() defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock() c.mainHostMap.Lock()
@ -330,11 +336,16 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
// Check if we already have a tunnel with this vpn ip // Check if we already have a tunnel with this vpn ip
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp] existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
if found && existingHostInfo != nil { if found && existingHostInfo != nil {
testHostInfo := existingHostInfo
for testHostInfo != nil {
// Is it just a delayed handshake packet? // Is it just a delayed handshake packet?
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) { if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) {
return existingHostInfo, ErrAlreadySeen return existingHostInfo, ErrAlreadySeen
} }
testHostInfo = testHostInfo.next
}
// Is this a newer handshake? // Is this a newer handshake?
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime { if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime {
return existingHostInfo, ErrExistingHostInfo return existingHostInfo, ErrExistingHostInfo
@ -364,56 +375,19 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
Info("New host shadows existing host remoteIndex") Info("New host shadows existing host remoteIndex")
} }
// Check if we are also handshaking with this vpn ip c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
pendingHostInfo, found := c.pendingHostMap.Hosts[hostinfo.vpnIp]
if found && pendingHostInfo != nil {
if !overwrite {
// We won, let our pending handshake win
return pendingHostInfo, ErrExistingHandshake
}
// We lost, take this handshake and move any cached packets over so they get sent
pendingHostInfo.ConnectionState.queueLock.Lock()
hostinfo.packetStore = append(hostinfo.packetStore, pendingHostInfo.packetStore...)
c.pendingHostMap.unlockedDeleteHostInfo(pendingHostInfo)
pendingHostInfo.ConnectionState.queueLock.Unlock()
pendingHostInfo.logger(c.l).Info("Handshake race lost, replacing pending handshake with completed tunnel")
}
if existingHostInfo != nil {
// We are going to overwrite this entry, so remove the old references
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
delete(c.mainHostMap.Relays, relayIdx)
}
}
c.mainHostMap.addHostInfo(hostinfo, f)
return existingHostInfo, nil return existingHostInfo, nil
} }
// Complete is a simpler version of CheckAndComplete when we already know we // Complete is a simpler version of CheckAndComplete when we already know we
// won't have a localIndexId collision because we already have an entry in the // won't have a localIndexId collision because we already have an entry in the
// pendingHostMap // pendingHostMap. An existing hostinfo is returned if there was one.
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) { func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) *HostInfo {
c.pendingHostMap.Lock() c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock() defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock() c.mainHostMap.Lock()
defer c.mainHostMap.Unlock() defer c.mainHostMap.Unlock()
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
if found && existingHostInfo != nil {
// We are going to overwrite this entry, so remove the old references
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
delete(c.mainHostMap.Relays, relayIdx)
}
}
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId] existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
if found && existingRemoteIndex != nil { if found && existingRemoteIndex != nil {
// We have a collision, but this can happen since we can't control // We have a collision, but this can happen since we can't control
@ -423,8 +397,10 @@ func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
Info("New host shadows existing host remoteIndex") Info("New host shadows existing host remoteIndex")
} }
c.mainHostMap.addHostInfo(hostinfo, f) existingHostInfo := c.mainHostMap.Hosts[hostinfo.vpnIp]
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo) c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
return existingHostInfo
} }
// AddIndexHostInfo generates a unique localIndexId for this HostInfo // AddIndexHostInfo generates a unique localIndexId for this HostInfo

View File

@ -21,11 +21,7 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{} mw := &mockEncWriter{}
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
lh := &LightHouse{ lh := newTestLighthouse()
atomicStaticList: make(map[iputil.VpnIp]struct{}),
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
addrMap: make(map[iputil.VpnIp]*RemoteList),
}
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
@ -79,12 +75,7 @@ func Test_NewHandshakeManagerTrigger(t *testing.T) {
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{} mw := &mockEncWriter{}
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
lh := &LightHouse{ lh := newTestLighthouse()
addrMap: make(map[iputil.VpnIp]*RemoteList),
l: l,
atomicStaticList: make(map[iputil.VpnIp]struct{}),
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
}
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
@ -115,8 +106,8 @@ func Test_NewHandshakeManagerTrigger(t *testing.T) {
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer)) assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
} }
func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) { func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
for _, i := range tw.wheel { for _, i := range tw.t.wheel {
n := i.Head n := i.Head
for n != nil { for n != nil {
c++ c++

View File

@ -18,11 +18,15 @@ import (
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
) )
//const ProbeLen = 100 // const ProbeLen = 100
const PromoteEvery = 1000 const PromoteEvery = 1000
const ReQueryEvery = 5000 const ReQueryEvery = 5000
const MaxRemotes = 10 const MaxRemotes = 10
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
// 5 allows for an initial handshake and each host pair re-handshaking twice
const MaxHostInfosPerVpnIp = 5
// How long we should prevent roaming back to the previous IP. // How long we should prevent roaming back to the previous IP.
// This helps prevent flapping due to packets already in flight // This helps prevent flapping due to packets already in flight
const RoamingSuppressSeconds = 2 const RoamingSuppressSeconds = 2
@ -153,7 +157,7 @@ type HostInfo struct {
remote *udp.Addr remote *udp.Addr
remotes *RemoteList remotes *RemoteList
promoteCounter uint32 promoteCounter atomic.Uint32
multiportTx bool multiportTx bool
multiportRx bool multiportRx bool
ConnectionState *ConnectionState ConnectionState *ConnectionState
@ -182,6 +186,10 @@ type HostInfo struct {
lastRoam time.Time lastRoam time.Time
lastRoamRemote *udp.Addr lastRoamRemote *udp.Addr
// Used to track other hostinfos for this vpn ip since only 1 can be primary
// Synchronised via hostmap lock and not the hostinfo lock.
next, prev *HostInfo
} }
type ViaSender struct { type ViaSender struct {
@ -286,7 +294,6 @@ func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (
if h, ok := hm.Hosts[vpnIp]; !ok { if h, ok := hm.Hosts[vpnIp]; !ok {
hm.RUnlock() hm.RUnlock()
h = &HostInfo{ h = &HostInfo{
promoteCounter: 0,
vpnIp: vpnIp, vpnIp: vpnIp,
HandshakePacket: make(map[uint8][]byte, 0), HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{ relayState: RelayState{
@ -398,9 +405,12 @@ func (hm *HostMap) DeleteReverseIndex(index uint32) {
} }
} }
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) { // DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
// Delete the host itself, ensuring it's not modified anymore // Delete the host itself, ensuring it's not modified anymore
hm.Lock() hm.Lock()
// If we have a previous or next hostinfo then we are not the last one for this vpn ip
final := (hostinfo.next == nil && hostinfo.prev == nil)
hm.unlockedDeleteHostInfo(hostinfo) hm.unlockedDeleteHostInfo(hostinfo)
hm.Unlock() hm.Unlock()
@ -424,6 +434,8 @@ func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) {
for _, localIdx := range teardownRelayIdx { for _, localIdx := range teardownRelayIdx {
hm.RemoveRelay(localIdx) hm.RemoveRelay(localIdx)
} }
return final
} }
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) { func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
@ -432,29 +444,81 @@ func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
delete(hm.RemoteIndexes, localIdx) delete(hm.RemoteIndexes, localIdx)
} }
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) { func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
// Check if this same hostId is in the hostmap with a different instance. hm.Lock()
// This could happen if we have an entry in the pending hostmap with different defer hm.Unlock()
// index values than the one in the main hostmap. hm.unlockedMakePrimary(hostinfo)
hostinfo2, ok := hm.Hosts[hostinfo.vpnIp] }
if ok && hostinfo2 != hostinfo {
delete(hm.Hosts, hostinfo2.vpnIp) func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
delete(hm.Indexes, hostinfo2.localIndexId) oldHostinfo := hm.Hosts[hostinfo.vpnIp]
delete(hm.RemoteIndexes, hostinfo2.remoteIndexId) if oldHostinfo == hostinfo {
return
} }
if hostinfo.prev != nil {
hostinfo.prev.next = hostinfo.next
}
if hostinfo.next != nil {
hostinfo.next.prev = hostinfo.prev
}
hm.Hosts[hostinfo.vpnIp] = hostinfo
if oldHostinfo == nil {
return
}
hostinfo.next = oldHostinfo
oldHostinfo.prev = hostinfo
hostinfo.prev = nil
}
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
primary, ok := hm.Hosts[hostinfo.vpnIp]
if ok && primary == hostinfo {
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
delete(hm.Hosts, hostinfo.vpnIp) delete(hm.Hosts, hostinfo.vpnIp)
if len(hm.Hosts) == 0 { if len(hm.Hosts) == 0 {
hm.Hosts = map[iputil.VpnIp]*HostInfo{} hm.Hosts = map[iputil.VpnIp]*HostInfo{}
} }
delete(hm.Indexes, hostinfo.localIndexId)
if len(hm.Indexes) == 0 { if hostinfo.next != nil {
hm.Indexes = map[uint32]*HostInfo{} // We had more than 1 hostinfo at this vpnip, promote the next in the list to primary
hm.Hosts[hostinfo.vpnIp] = hostinfo.next
// It is primary, there is no previous hostinfo now
hostinfo.next.prev = nil
} }
} else {
// Relink if we were in the middle of multiple hostinfos for this vpn ip
if hostinfo.prev != nil {
hostinfo.prev.next = hostinfo.next
}
if hostinfo.next != nil {
hostinfo.next.prev = hostinfo.prev
}
}
hostinfo.next = nil
hostinfo.prev = nil
// The remote index uses index ids outside our control so lets make sure we are only removing
// the remote index pointer here if it points to the hostinfo we are deleting
hostinfo2, ok := hm.RemoteIndexes[hostinfo.remoteIndexId]
if ok && hostinfo2 == hostinfo {
delete(hm.RemoteIndexes, hostinfo.remoteIndexId) delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
if len(hm.RemoteIndexes) == 0 { if len(hm.RemoteIndexes) == 0 {
hm.RemoteIndexes = map[uint32]*HostInfo{} hm.RemoteIndexes = map[uint32]*HostInfo{}
} }
}
delete(hm.Indexes, hostinfo.localIndexId)
if len(hm.Indexes) == 0 {
hm.Indexes = map[uint32]*HostInfo{}
}
if hm.l.Level >= logrus.DebugLevel { if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts), hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts),
@ -523,15 +587,22 @@ func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*Host
return nil, errors.New("unable to find host") return nil, errors.New("unable to find host")
} }
// We already have the hm Lock when this is called, so make sure to not call // unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
// any other methods that might try to grab it again // If an entry exists for the Hosts table (vpnIp -> hostinfo) then the provided hostinfo will be made primary
func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) { func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
if f.serveDns { if f.serveDns {
remoteCert := hostinfo.ConnectionState.peerCert remoteCert := hostinfo.ConnectionState.peerCert
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String()) dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
} }
existing := hm.Hosts[hostinfo.vpnIp]
hm.Hosts[hostinfo.vpnIp] = hostinfo hm.Hosts[hostinfo.vpnIp] = hostinfo
if existing != nil {
hostinfo.next = existing
existing.prev = hostinfo
}
hm.Indexes[hostinfo.localIndexId] = hostinfo hm.Indexes[hostinfo.localIndexId] = hostinfo
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
@ -540,6 +611,16 @@ func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) {
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}). "hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
Debug("Hostmap vpnIp added") Debug("Hostmap vpnIp added")
} }
i := 1
check := hostinfo
for check != nil {
if i > MaxHostInfosPerVpnIp {
hm.unlockedDeleteHostInfo(check)
}
check = check.next
i++
}
} }
// punchList assembles a list of all non nil RemoteList pointer entries in this hostmap // punchList assembles a list of all non nil RemoteList pointer entries in this hostmap
@ -593,7 +674,7 @@ func (hm *HostMap) Punchy(ctx context.Context, conn *udp.Conn) {
// TryPromoteBest handles re-querying lighthouses and probing for better paths // TryPromoteBest handles re-querying lighthouses and probing for better paths
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients! // NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) { func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
c := atomic.AddUint32(&i.promoteCounter, 1) c := i.promoteCounter.Add(1)
if c%PromoteEvery == 0 { if c%PromoteEvery == 0 {
// The lock here is currently protecting i.remote access // The lock here is currently protecting i.remote access
i.RLock() i.RLock()
@ -660,7 +741,7 @@ func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
i.HandshakeComplete = true i.HandshakeComplete = true
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen. //TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
// Clamping it to 2 gets us out of the woods for now // Clamping it to 2 gets us out of the woods for now
atomic.StoreUint64(&i.ConnectionState.atomicMessageCounter, 2) i.ConnectionState.messageCounter.Store(2)
if l.Level >= logrus.DebugLevel { if l.Level >= logrus.DebugLevel {
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore)) i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
@ -767,7 +848,10 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
return logrus.NewEntry(l) return logrus.NewEntry(l)
} }
li := l.WithField("vpnIp", i.vpnIp) li := l.WithField("vpnIp", i.vpnIp).
WithField("localIndex", i.localIndexId).
WithField("remoteIndex", i.remoteIndexId)
if connState := i.ConnectionState; connState != nil { if connState := i.ConnectionState; connState != nil {
if peerCert := connState.peerCert; peerCert != nil { if peerCert := connState.peerCert; peerCert != nil {
li = li.WithField("certName", peerCert.Details.Name) li = li.WithField("certName", peerCert.Details.Name)

View File

@ -1 +1,207 @@
package nebula package nebula
import (
"net"
"testing"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert"
)
func TestHostMap_MakePrimary(t *testing.T) {
l := test.NewLogger()
hm := NewHostMap(
l, "test",
&net.IPNet{
IP: net.IP{10, 0, 0, 1},
Mask: net.IPMask{255, 255, 255, 0},
},
[]*net.IPNet{},
)
f := &Interface{}
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
hm.unlockedAddHostInfo(h4, f)
hm.unlockedAddHostInfo(h3, f)
hm.unlockedAddHostInfo(h2, f)
hm.unlockedAddHostInfo(h1, f)
// Make sure we go h1 -> h2 -> h3 -> h4
prim, _ := hm.QueryVpnIp(1)
assert.Equal(t, h1.localIndexId, prim.localIndexId)
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
assert.Nil(t, h4.next)
// Swap h3/middle to primary
hm.MakePrimary(h3)
// Make sure we go h3 -> h1 -> h2 -> h4
prim, _ = hm.QueryVpnIp(1)
assert.Equal(t, h3.localIndexId, prim.localIndexId)
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
assert.Nil(t, h4.next)
// Swap h4/tail to primary
hm.MakePrimary(h4)
// Make sure we go h4 -> h3 -> h1 -> h2
prim, _ = hm.QueryVpnIp(1)
assert.Equal(t, h4.localIndexId, prim.localIndexId)
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h1.localIndexId, h3.next.localIndexId)
assert.Equal(t, h4.localIndexId, h3.prev.localIndexId)
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
assert.Nil(t, h2.next)
// Swap h4 again should be no-op
hm.MakePrimary(h4)
// Make sure we go h4 -> h3 -> h1 -> h2
prim, _ = hm.QueryVpnIp(1)
assert.Equal(t, h4.localIndexId, prim.localIndexId)
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h1.localIndexId, h3.next.localIndexId)
assert.Equal(t, h4.localIndexId, h3.prev.localIndexId)
assert.Equal(t, h2.localIndexId, h1.next.localIndexId)
assert.Equal(t, h3.localIndexId, h1.prev.localIndexId)
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
assert.Nil(t, h2.next)
}
func TestHostMap_DeleteHostInfo(t *testing.T) {
l := test.NewLogger()
hm := NewHostMap(
l, "test",
&net.IPNet{
IP: net.IP{10, 0, 0, 1},
Mask: net.IPMask{255, 255, 255, 0},
},
[]*net.IPNet{},
)
f := &Interface{}
h1 := &HostInfo{vpnIp: 1, localIndexId: 1}
h2 := &HostInfo{vpnIp: 1, localIndexId: 2}
h3 := &HostInfo{vpnIp: 1, localIndexId: 3}
h4 := &HostInfo{vpnIp: 1, localIndexId: 4}
h5 := &HostInfo{vpnIp: 1, localIndexId: 5}
h6 := &HostInfo{vpnIp: 1, localIndexId: 6}
hm.unlockedAddHostInfo(h6, f)
hm.unlockedAddHostInfo(h5, f)
hm.unlockedAddHostInfo(h4, f)
hm.unlockedAddHostInfo(h3, f)
hm.unlockedAddHostInfo(h2, f)
hm.unlockedAddHostInfo(h1, f)
// h6 should be deleted
assert.Nil(t, h6.next)
assert.Nil(t, h6.prev)
_, err := hm.QueryIndex(h6.localIndexId)
assert.Error(t, err)
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
prim, _ := hm.QueryVpnIp(1)
assert.Equal(t, h1.localIndexId, prim.localIndexId)
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h1.localIndexId, h2.prev.localIndexId)
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
assert.Nil(t, h5.next)
// Delete primary
hm.DeleteHostInfo(h1)
assert.Nil(t, h1.prev)
assert.Nil(t, h1.next)
// Make sure we go h2 -> h3 -> h4 -> h5
prim, _ = hm.QueryVpnIp(1)
assert.Equal(t, h2.localIndexId, prim.localIndexId)
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h3.localIndexId, h2.next.localIndexId)
assert.Equal(t, h2.localIndexId, h3.prev.localIndexId)
assert.Equal(t, h4.localIndexId, h3.next.localIndexId)
assert.Equal(t, h3.localIndexId, h4.prev.localIndexId)
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
assert.Nil(t, h5.next)
// Delete in the middle
hm.DeleteHostInfo(h3)
assert.Nil(t, h3.prev)
assert.Nil(t, h3.next)
// Make sure we go h2 -> h4 -> h5
prim, _ = hm.QueryVpnIp(1)
assert.Equal(t, h2.localIndexId, prim.localIndexId)
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
assert.Equal(t, h5.localIndexId, h4.next.localIndexId)
assert.Equal(t, h4.localIndexId, h5.prev.localIndexId)
assert.Nil(t, h5.next)
// Delete the tail
hm.DeleteHostInfo(h5)
assert.Nil(t, h5.prev)
assert.Nil(t, h5.next)
// Make sure we go h2 -> h4
prim, _ = hm.QueryVpnIp(1)
assert.Equal(t, h2.localIndexId, prim.localIndexId)
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
assert.Equal(t, h4.localIndexId, h2.next.localIndexId)
assert.Equal(t, h2.localIndexId, h4.prev.localIndexId)
assert.Nil(t, h4.next)
// Delete the head
hm.DeleteHostInfo(h2)
assert.Nil(t, h2.prev)
assert.Nil(t, h2.next)
// Make sure we only have h4
prim, _ = hm.QueryVpnIp(1)
assert.Equal(t, h4.localIndexId, prim.localIndexId)
assert.Nil(t, prim.prev)
assert.Nil(t, prim.next)
assert.Nil(t, h4.next)
// Delete the only item
hm.DeleteHostInfo(h4)
assert.Nil(t, h4.prev)
assert.Nil(t, h4.next)
// Make sure we have nil
prim, _ = hm.QueryVpnIp(1)
assert.Nil(t, prim)
}

24
hostmap_tester.go Normal file
View File

@ -0,0 +1,24 @@
//go:build e2e_testing
// +build e2e_testing
package nebula
// This file contains functions used to export information to the e2e testing framework
import "github.com/slackhq/nebula/iputil"
func (i *HostInfo) GetVpnIp() iputil.VpnIp {
return i.vpnIp
}
func (i *HostInfo) GetLocalIndex() uint32 {
return i.localIndexId
}
func (i *HostInfo) GetRemoteIndex() uint32 {
return i.remoteIndexId
}
func (i *HostInfo) GetRelayState() RelayState {
return i.relayState
}

View File

@ -1,8 +1,6 @@
package nebula package nebula
import ( import (
"sync/atomic"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/firewall"
@ -27,8 +25,9 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
if fwPacket.RemoteIP == f.myVpnIp { if fwPacket.RemoteIP == f.myVpnIp {
// Immediately forward packets from self to self. // Immediately forward packets from self to self.
// This should only happen on Darwin-based hosts, which routes packets from // This should only happen on Darwin-based and FreeBSD hosts, which
// the Nebula IP to the Nebula IP through the Nebula TUN device. // routes packets from the Nebula IP to the Nebula IP through the Nebula
// TUN device.
if immediatelyForwardToSelf { if immediatelyForwardToSelf {
_, err := f.readers[q].Write(packet) _, err := f.readers[q].Write(packet)
if err != nil { if err != nil {
@ -222,10 +221,10 @@ func (f *Interface) SendVia(viaIfc interface{},
) { ) {
via := viaIfc.(*HostInfo) via := viaIfc.(*HostInfo)
relay := relayIfc.(*Relay) relay := relayIfc.(*Relay)
c := atomic.AddUint64(&via.ConnectionState.atomicMessageCounter, 1) c := via.ConnectionState.messageCounter.Add(1)
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c) out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
f.connectionManager.Out(via.vpnIp) f.connectionManager.Out(via.localIndexId)
// Authenticate the header and payload, but do not encrypt for this message type. // Authenticate the header and payload, but do not encrypt for this message type.
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload. // The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
@ -298,11 +297,11 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
//TODO: enable if we do more than 1 tun queue //TODO: enable if we do more than 1 tun queue
//ci.writeLock.Lock() //ci.writeLock.Lock()
c := atomic.AddUint64(&ci.atomicMessageCounter, 1) c := ci.messageCounter.Add(1)
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p) //l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c) out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
f.connectionManager.Out(hostinfo.vpnIp) f.connectionManager.Out(hostinfo.localIndexId)
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against // Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
// all our IPs and enable a faster roaming. // all our IPs and enable a faster roaming.

6
inside_bsd.go Normal file
View File

@ -0,0 +1,6 @@
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package nebula
const immediatelyForwardToSelf bool = true

View File

@ -1,3 +0,0 @@
package nebula
const immediatelyForwardToSelf bool = true

View File

@ -1,5 +1,5 @@
//go:build !darwin //go:build !darwin && !dragonfly && !freebsd && !netbsd && !openbsd
// +build !darwin // +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd
package nebula package nebula

View File

@ -67,7 +67,7 @@ type Interface struct {
routines int routines int
caPool *cert.NebulaCAPool caPool *cert.NebulaCAPool
disconnectInvalid bool disconnectInvalid bool
closed int32 closed atomic.Bool
relayManager *relayManager relayManager *relayManager
sendRecvErrorConfig sendRecvErrorConfig sendRecvErrorConfig sendRecvErrorConfig
@ -267,7 +267,7 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
for { for {
n, err := reader.Read(packet) n, err := reader.Read(packet)
if err != nil { if err != nil {
if errors.Is(err, os.ErrClosed) && atomic.LoadInt32(&f.closed) != 0 { if errors.Is(err, os.ErrClosed) && f.closed.Load() {
return return
} }
@ -413,7 +413,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
} }
func (f *Interface) Close() error { func (f *Interface) Close() error {
atomic.StoreInt32(&f.closed, 1) f.closed.Store(true)
// Release the tun device // Release the tun device
return f.inside.Close() return f.inside.Close()

View File

@ -9,7 +9,6 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"unsafe"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -49,29 +48,29 @@ type LightHouse struct {
// respond with. // respond with.
// - When we are not a lighthouse, this filters which addresses we accept // - When we are not a lighthouse, this filters which addresses we accept
// from lighthouses. // from lighthouses.
atomicRemoteAllowList *RemoteAllowList remoteAllowList atomic.Pointer[RemoteAllowList]
// filters local addresses that we advertise to lighthouses // filters local addresses that we advertise to lighthouses
atomicLocalAllowList *LocalAllowList localAllowList atomic.Pointer[LocalAllowList]
// used to trigger the HandshakeManager when we receive HostQueryReply // used to trigger the HandshakeManager when we receive HostQueryReply
handshakeTrigger chan<- iputil.VpnIp handshakeTrigger chan<- iputil.VpnIp
// atomicStaticList exists to avoid having a bool in each addrMap entry // staticList exists to avoid having a bool in each addrMap entry
// since static should be rare // since static should be rare
atomicStaticList map[iputil.VpnIp]struct{} staticList atomic.Pointer[map[iputil.VpnIp]struct{}]
atomicLighthouses map[iputil.VpnIp]struct{} lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}]
atomicInterval int64 interval atomic.Int64
updateCancel context.CancelFunc updateCancel context.CancelFunc
updateParentCtx context.Context updateParentCtx context.Context
updateUdp udp.EncWriter updateUdp udp.EncWriter
nebulaPort uint32 // 32 bits because protobuf does not have a uint16 nebulaPort uint32 // 32 bits because protobuf does not have a uint16
atomicAdvertiseAddrs []netIpAndPort advertiseAddrs atomic.Pointer[[]netIpAndPort]
// IP's of relays that can be used by peers to access me // IP's of relays that can be used by peers to access me
atomicRelaysForMe []iputil.VpnIp relaysForMe atomic.Pointer[[]iputil.VpnIp]
metrics *MessageMetrics metrics *MessageMetrics
metricHolepunchTx metrics.Counter metricHolepunchTx metrics.Counter
@ -104,12 +103,14 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet,
myVpnNet: myVpnNet, myVpnNet: myVpnNet,
addrMap: make(map[iputil.VpnIp]*RemoteList), addrMap: make(map[iputil.VpnIp]*RemoteList),
nebulaPort: nebulaPort, nebulaPort: nebulaPort,
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
atomicStaticList: make(map[iputil.VpnIp]struct{}),
punchConn: pc, punchConn: pc,
punchy: p, punchy: p,
l: l, l: l,
} }
lighthouses := make(map[iputil.VpnIp]struct{})
h.lighthouses.Store(&lighthouses)
staticList := make(map[iputil.VpnIp]struct{})
h.staticList.Store(&staticList)
if c.GetBool("stats.lighthouse_metrics", false) { if c.GetBool("stats.lighthouse_metrics", false) {
h.metrics = newLighthouseMetrics() h.metrics = newLighthouseMetrics()
@ -137,31 +138,31 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet,
} }
func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} { func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)))) return *lh.staticList.Load()
} }
func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} { func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)))) return *lh.lighthouses.Load()
} }
func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList { func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList {
return (*RemoteAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)))) return lh.remoteAllowList.Load()
} }
func (lh *LightHouse) GetLocalAllowList() *LocalAllowList { func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
return (*LocalAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)))) return lh.localAllowList.Load()
} }
func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort { func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort {
return *(*[]netIpAndPort)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)))) return *lh.advertiseAddrs.Load()
} }
func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp { func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
return *(*[]iputil.VpnIp)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)))) return *lh.relaysForMe.Load()
} }
func (lh *LightHouse) GetUpdateInterval() int64 { func (lh *LightHouse) GetUpdateInterval() int64 {
return atomic.LoadInt64(&lh.atomicInterval) return lh.interval.Load()
} }
func (lh *LightHouse) reload(c *config.C, initial bool) error { func (lh *LightHouse) reload(c *config.C, initial bool) error {
@ -188,7 +189,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort}) advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort})
} }
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)), unsafe.Pointer(&advAddrs)) lh.advertiseAddrs.Store(&advAddrs)
if !initial { if !initial {
lh.l.Info("lighthouse.advertise_addrs has changed") lh.l.Info("lighthouse.advertise_addrs has changed")
@ -196,10 +197,10 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
} }
if initial || c.HasChanged("lighthouse.interval") { if initial || c.HasChanged("lighthouse.interval") {
atomic.StoreInt64(&lh.atomicInterval, int64(c.GetInt("lighthouse.interval", 10))) lh.interval.Store(int64(c.GetInt("lighthouse.interval", 10)))
if !initial { if !initial {
lh.l.Infof("lighthouse.interval changed to %v", lh.atomicInterval) lh.l.Infof("lighthouse.interval changed to %v", lh.interval.Load())
if lh.updateCancel != nil { if lh.updateCancel != nil {
// May not always have a running routine // May not always have a running routine
@ -216,7 +217,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err) return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
} }
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)), unsafe.Pointer(ral)) lh.remoteAllowList.Store(ral)
if !initial { if !initial {
//TODO: a diff will be annoyingly difficult //TODO: a diff will be annoyingly difficult
lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed") lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed")
@ -229,7 +230,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err) return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
} }
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)), unsafe.Pointer(lal)) lh.localAllowList.Store(lal)
if !initial { if !initial {
//TODO: a diff will be annoyingly difficult //TODO: a diff will be annoyingly difficult
lh.l.Info("lighthouse.local_allow_list has changed") lh.l.Info("lighthouse.local_allow_list has changed")
@ -244,7 +245,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
return err return err
} }
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)), unsafe.Pointer(&staticList)) lh.staticList.Store(&staticList)
if !initial { if !initial {
//TODO: we should remove any remote list entries for static hosts that were removed/modified? //TODO: we should remove any remote list entries for static hosts that were removed/modified?
lh.l.Info("static_host_map has changed") lh.l.Info("static_host_map has changed")
@ -259,7 +260,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
return err return err
} }
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)), unsafe.Pointer(&lhMap)) lh.lighthouses.Store(&lhMap)
if !initial { if !initial {
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic //NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
lh.l.Info("lighthouse.hosts has changed") lh.l.Info("lighthouse.hosts has changed")
@ -274,7 +275,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
lh.l.Info("Ignoring relays from config because am_relay is true") lh.l.Info("Ignoring relays from config because am_relay is true")
} }
relaysForMe := []iputil.VpnIp{} relaysForMe := []iputil.VpnIp{}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe)) lh.relaysForMe.Store(&relaysForMe)
case false: case false:
relaysForMe := []iputil.VpnIp{} relaysForMe := []iputil.VpnIp{}
for _, v := range c.GetStringSlice("relay.relays", nil) { for _, v := range c.GetStringSlice("relay.relays", nil) {
@ -285,7 +286,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP)) relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP))
} }
} }
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe)) lh.relaysForMe.Store(&relaysForMe)
} }
} }
@ -460,7 +461,7 @@ func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner // AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with // We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client // And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
//NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it // NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
func (lh *LightHouse) addStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr, staticList map[iputil.VpnIp]struct{}) { func (lh *LightHouse) addStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr, staticList map[iputil.VpnIp]struct{}) {
lh.Lock() lh.Lock()
am := lh.unlockedGetRemoteList(vpnIp) am := lh.unlockedGetRemoteList(vpnIp)

View File

@ -202,7 +202,10 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
hostMap := NewHostMap(l, "main", tunCidr, preferredRanges) hostMap := NewHostMap(l, "main", tunCidr, preferredRanges)
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false) hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created") l.
WithField("network", hostMap.vpnCIDR.String()).
WithField("preferredRanges", hostMap.preferredRanges).
Info("Main HostMap created")
/* /*
config.SetDefault("promoter.interval", 10) config.SetDefault("promoter.interval", 10)

View File

@ -84,7 +84,7 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
signedPayload = signedPayload[header.Len:] signedPayload = signedPayload[header.Len:]
// Pull the Roaming parts up here, and return in all call paths. // Pull the Roaming parts up here, and return in all call paths.
f.handleHostRoaming(hostinfo, addr) f.handleHostRoaming(hostinfo, addr)
f.connectionManager.In(hostinfo.vpnIp) f.connectionManager.In(hostinfo.localIndexId)
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex) relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
if !ok { if !ok {
@ -93,7 +93,7 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
hostinfo.logger(f.l).WithField("hostinfo", hostinfo.vpnIp).WithField("remoteIndex", h.RemoteIndex).Errorf("HostInfo missing remote index") hostinfo.logger(f.l).WithField("hostinfo", hostinfo.vpnIp).WithField("remoteIndex", h.RemoteIndex).Errorf("HostInfo missing remote index")
// Delete my local index from the hostmap // Delete my local index from the hostmap
f.hostMap.DeleteRelayIdx(h.RemoteIndex) f.hostMap.DeleteRelayIdx(h.RemoteIndex)
// When the peer doesn't recieve any return traffic, its connection_manager will eventually clean up // When the peer doesn't receive any return traffic, its connection_manager will eventually clean up
// the broken relay when it cleans up the associated HostInfo object. // the broken relay when it cleans up the associated HostInfo object.
return return
} }
@ -237,17 +237,19 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by
f.handleHostRoaming(hostinfo, addr) f.handleHostRoaming(hostinfo, addr)
f.connectionManager.In(hostinfo.vpnIp) f.connectionManager.In(hostinfo.localIndexId)
} }
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote // closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
func (f *Interface) closeTunnel(hostInfo *HostInfo) { func (f *Interface) closeTunnel(hostInfo *HostInfo) {
//TODO: this would be better as a single function in ConnectionManager that handled locks appropriately //TODO: this would be better as a single function in ConnectionManager that handled locks appropriately
f.connectionManager.ClearIP(hostInfo.vpnIp) f.connectionManager.ClearLocalIndex(hostInfo.localIndexId)
f.connectionManager.ClearPendingDeletion(hostInfo.vpnIp) f.connectionManager.ClearPendingDeletion(hostInfo.localIndexId)
final := f.hostMap.DeleteHostInfo(hostInfo)
if final {
// We no longer have any tunnels with this vpn ip, clear learned lighthouse state to lower memory usage
f.lightHouse.DeleteVpnIp(hostInfo.vpnIp) f.lightHouse.DeleteVpnIp(hostInfo.vpnIp)
}
f.hostMap.DeleteHostInfo(hostInfo)
} }
// sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote // sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote
@ -418,7 +420,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
return return
} }
f.connectionManager.In(hostinfo.vpnIp) f.connectionManager.In(hostinfo.localIndexId)
_, err = f.readers[q].Write(out) _, err = f.readers[q].Write(out)
if err != nil { if err != nil {
f.l.WithError(err).Error("Failed to write to tun") f.l.WithError(err).Error("Failed to write to tun")

View File

@ -28,11 +28,13 @@ func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, _ int, routes
return nil, err return nil, err
} }
// XXX Android returns an fd in non-blocking mode which is necessary for shutdown to work properly.
// Be sure not to call file.Fd() as it will set the fd to blocking mode.
file := os.NewFile(uintptr(deviceFd), "/dev/net/tun") file := os.NewFile(uintptr(deviceFd), "/dev/net/tun")
return &tun{ return &tun{
ReadWriteCloser: file, ReadWriteCloser: file,
fd: int(file.Fd()), fd: deviceFd,
cidr: cidr, cidr: cidr,
l: l, l: l,
routeTree: routeTree, routeTree: routeTree,

View File

@ -51,7 +51,7 @@ func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int
// packets should exit the udp side, capture them with udpConn.Get // packets should exit the udp side, capture them with udpConn.Get
func (t *TestTun) Send(packet []byte) { func (t *TestTun) Send(packet []byte) {
if t.l.Level >= logrus.InfoLevel { if t.l.Level >= logrus.InfoLevel {
t.l.WithField("dataLen", len(packet)).Info("Tun receiving injected packet") t.l.WithField("dataLen", len(packet)).Debug("Tun receiving injected packet")
} }
t.rxPackets <- packet t.rxPackets <- packet
} }

View File

@ -9,9 +9,9 @@ import (
) )
type Punchy struct { type Punchy struct {
atomicPunch int32 punch atomic.Bool
atomicRespond int32 respond atomic.Bool
atomicDelay time.Duration delay atomic.Int64
l *logrus.Logger l *logrus.Logger
} }
@ -36,12 +36,7 @@ func (p *Punchy) reload(c *config.C, initial bool) {
yes = c.GetBool("punchy", false) yes = c.GetBool("punchy", false)
} }
if yes { p.punch.Store(yes)
atomic.StoreInt32(&p.atomicPunch, 1)
} else {
atomic.StoreInt32(&p.atomicPunch, 0)
}
} else if c.HasChanged("punchy.punch") || c.HasChanged("punchy") { } else if c.HasChanged("punchy.punch") || c.HasChanged("punchy") {
//TODO: it should be relatively easy to support this, just need to be able to cancel the goroutine and boot it up from here //TODO: it should be relatively easy to support this, just need to be able to cancel the goroutine and boot it up from here
p.l.Warn("Changing punchy.punch with reload is not supported, ignoring.") p.l.Warn("Changing punchy.punch with reload is not supported, ignoring.")
@ -56,11 +51,7 @@ func (p *Punchy) reload(c *config.C, initial bool) {
yes = c.GetBool("punch_back", false) yes = c.GetBool("punch_back", false)
} }
if yes { p.respond.Store(yes)
atomic.StoreInt32(&p.atomicRespond, 1)
} else {
atomic.StoreInt32(&p.atomicRespond, 0)
}
if !initial { if !initial {
p.l.Infof("punchy.respond changed to %v", p.GetRespond()) p.l.Infof("punchy.respond changed to %v", p.GetRespond())
@ -69,7 +60,7 @@ func (p *Punchy) reload(c *config.C, initial bool) {
//NOTE: this will not apply to any in progress operations, only the next one //NOTE: this will not apply to any in progress operations, only the next one
if initial || c.HasChanged("punchy.delay") { if initial || c.HasChanged("punchy.delay") {
atomic.StoreInt64((*int64)(&p.atomicDelay), (int64)(c.GetDuration("punchy.delay", time.Second))) p.delay.Store((int64)(c.GetDuration("punchy.delay", time.Second)))
if !initial { if !initial {
p.l.Infof("punchy.delay changed to %s", p.GetDelay()) p.l.Infof("punchy.delay changed to %s", p.GetDelay())
} }
@ -77,13 +68,13 @@ func (p *Punchy) reload(c *config.C, initial bool) {
} }
func (p *Punchy) GetPunch() bool { func (p *Punchy) GetPunch() bool {
return atomic.LoadInt32(&p.atomicPunch) == 1 return p.punch.Load()
} }
func (p *Punchy) GetRespond() bool { func (p *Punchy) GetRespond() bool {
return atomic.LoadInt32(&p.atomicRespond) == 1 return p.respond.Load()
} }
func (p *Punchy) GetDelay() time.Duration { func (p *Punchy) GetDelay() time.Duration {
return (time.Duration)(atomic.LoadInt64((*int64)(&p.atomicDelay))) return (time.Duration)(p.delay.Load())
} }

View File

@ -15,7 +15,7 @@ import (
type relayManager struct { type relayManager struct {
l *logrus.Logger l *logrus.Logger
hostmap *HostMap hostmap *HostMap
atomicAmRelay int32 amRelay atomic.Bool
} }
func NewRelayManager(ctx context.Context, l *logrus.Logger, hostmap *HostMap, c *config.C) *relayManager { func NewRelayManager(ctx context.Context, l *logrus.Logger, hostmap *HostMap, c *config.C) *relayManager {
@ -41,18 +41,11 @@ func (rm *relayManager) reload(c *config.C, initial bool) error {
} }
func (rm *relayManager) GetAmRelay() bool { func (rm *relayManager) GetAmRelay() bool {
return atomic.LoadInt32(&rm.atomicAmRelay) == 1 return rm.amRelay.Load()
} }
func (rm *relayManager) setAmRelay(v bool) { func (rm *relayManager) setAmRelay(v bool) {
var val int32 rm.amRelay.Store(v)
switch v {
case true:
val = 1
case false:
val = 0
}
atomic.StoreInt32(&rm.atomicAmRelay, val)
} }
// AddRelay finds an available relay index on the hostmap, and associates the relay info with it. // AddRelay finds an available relay index on the hostmap, and associates the relay info with it.
@ -68,6 +61,11 @@ func AddRelay(l *logrus.Logger, relayHostInfo *HostInfo, hm *HostMap, vpnIp iput
_, inRelays := hm.Relays[index] _, inRelays := hm.Relays[index]
if !inRelays { if !inRelays {
// Avoid standing up a relay that can't be used since only the primary hostinfo
// will be pointed to by the relay logic
//TODO: if there was an existing primary and it had relay state, should we merge?
hm.unlockedMakePrimary(relayHostInfo)
hm.Relays[index] = relayHostInfo hm.Relays[index] = relayHostInfo
newRelay := Relay{ newRelay := Relay{
Type: relayType, Type: relayType,

View File

@ -130,7 +130,7 @@ func (r *RemoteList) CopyAddrs(preferredRanges []*net.IPNet) []*udp.Addr {
// LearnRemote locks and sets the learned slot for the owner vpn ip to the provided addr // LearnRemote locks and sets the learned slot for the owner vpn ip to the provided addr
// Currently this is only needed when HostInfo.SetRemote is called as that should cover both handshaking and roaming. // Currently this is only needed when HostInfo.SetRemote is called as that should cover both handshaking and roaming.
// It will mark the deduplicated address list as dirty, so do not call it unless new information is available // It will mark the deduplicated address list as dirty, so do not call it unless new information is available
//TODO: this needs to support the allow list list // TODO: this needs to support the allow list list
func (r *RemoteList) LearnRemote(ownerVpnIp iputil.VpnIp, addr *udp.Addr) { func (r *RemoteList) LearnRemote(ownerVpnIp iputil.VpnIp, addr *udp.Addr) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()

13
ssh.go
View File

@ -24,6 +24,7 @@ import (
type sshListHostMapFlags struct { type sshListHostMapFlags struct {
Json bool Json bool
Pretty bool Pretty bool
ByIndex bool
} }
type sshPrintCertFlags struct { type sshPrintCertFlags struct {
@ -174,6 +175,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
s := sshListHostMapFlags{} s := sshListHostMapFlags{}
fl.BoolVar(&s.Json, "json", false, "outputs as json with more information") fl.BoolVar(&s.Json, "json", false, "outputs as json with more information")
fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json")
fl.BoolVar(&s.ByIndex, "by-index", false, "gets all hosts in the hostmap from the index table")
return fl, &s return fl, &s
}, },
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
@ -189,6 +191,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
s := sshListHostMapFlags{} s := sshListHostMapFlags{}
fl.BoolVar(&s.Json, "json", false, "outputs as json with more information") fl.BoolVar(&s.Json, "json", false, "outputs as json with more information")
fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json")
fl.BoolVar(&s.ByIndex, "by-index", false, "gets all hosts in the hostmap from the index table")
return fl, &s return fl, &s
}, },
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
@ -368,7 +371,13 @@ func sshListHostMap(hostMap *HostMap, a interface{}, w sshd.StringWriter) error
return nil return nil
} }
hm := listHostMap(hostMap) var hm []ControlHostInfo
if fs.ByIndex {
hm = listHostMapIndexes(hostMap)
} else {
hm = listHostMapHosts(hostMap)
}
sort.Slice(hm, func(i, j int) bool { sort.Slice(hm, func(i, j int) bool {
return bytes.Compare(hm[i].VpnIp, hm[j].VpnIp) < 0 return bytes.Compare(hm[i].VpnIp, hm[j].VpnIp) < 0
}) })
@ -805,7 +814,7 @@ func sshPrintRelays(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
case TerminalType: case TerminalType:
t = "terminal" t = "terminal"
default: default:
t = "unkown" t = "unknown"
} }
s := "" s := ""

View File

@ -40,8 +40,13 @@ func execCommand(c *Command, args []string, w StringWriter) error {
if c.Flags != nil { if c.Flags != nil {
fl, fs = c.Flags() fl, fs = c.Flags()
if fl != nil { if fl != nil {
//TODO: handle the error // SetOutput() here in case fl.Parse dumps usage.
fl.Parse(args) fl.SetOutput(w.GetWriter())
err := fl.Parse(args)
if err != nil {
// fl.Parse has dumped error information to the user via the w writer.
return err
}
args = fl.Args() args = fl.Args()
} }
} }

View File

@ -18,7 +18,7 @@ import (
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
) )
// startStats initializes stats from config. On success, if any futher work // startStats initializes stats from config. On success, if any further work
// is needed to serve stats, it returns a func to handle that work. If no // is needed to serve stats, it returns a func to handle that work. If no
// work is needed, it'll return nil. On failure, it returns nil, error. // work is needed, it'll return nil. On failure, it returns nil, error.
func startStats(l *logrus.Logger, c *config.C, buildVersion string, configTest bool) (func(), error) { func startStats(l *logrus.Logger, c *config.C, buildVersion string, configTest bool) (func(), error) {

View File

@ -1,17 +1,14 @@
package nebula package nebula
import ( import (
"sync"
"time" "time"
"github.com/slackhq/nebula/firewall"
) )
// How many timer objects should be cached // How many timer objects should be cached
const timerCacheMax = 50000 const timerCacheMax = 50000
var emptyFWPacket = firewall.Packet{} type TimerWheel[T any] struct {
type TimerWheel struct {
// Current tick // Current tick
current int current int
@ -26,60 +23,73 @@ type TimerWheel struct {
wheelDuration time.Duration wheelDuration time.Duration
// The actual wheel which is just a set of singly linked lists, head/tail pointers // The actual wheel which is just a set of singly linked lists, head/tail pointers
wheel []*TimeoutList wheel []*TimeoutList[T]
// Singly linked list of items that have timed out of the wheel // Singly linked list of items that have timed out of the wheel
expired *TimeoutList expired *TimeoutList[T]
// Item cache to avoid garbage collect // Item cache to avoid garbage collect
itemCache *TimeoutItem itemCache *TimeoutItem[T]
itemsCached int itemsCached int
} }
// Represents a tick in the wheel type LockingTimerWheel[T any] struct {
type TimeoutList struct { m sync.Mutex
Head *TimeoutItem t *TimerWheel[T]
Tail *TimeoutItem
} }
// Represents an item within a tick // TimeoutList Represents a tick in the wheel
type TimeoutItem struct { type TimeoutList[T any] struct {
Packet firewall.Packet Head *TimeoutItem[T]
Next *TimeoutItem Tail *TimeoutItem[T]
} }
// Builds a timer wheel and identifies the tick duration and wheel duration from the provided values // TimeoutItem Represents an item within a tick
type TimeoutItem[T any] struct {
Item T
Next *TimeoutItem[T]
}
// NewTimerWheel Builds a timer wheel and identifies the tick duration and wheel duration from the provided values
// Purge must be called once per entry to actually remove anything // Purge must be called once per entry to actually remove anything
func NewTimerWheel(min, max time.Duration) *TimerWheel { // The TimerWheel does not handle concurrency on its own.
// Locks around access to it must be used if multiple routines are manipulating it.
func NewTimerWheel[T any](min, max time.Duration) *TimerWheel[T] {
//TODO provide an error //TODO provide an error
//if min >= max { //if min >= max {
// return nil // return nil
//} //}
// Round down and add 1 so we can have the smallest # of ticks in the wheel and still account for a full // Round down and add 2 so we can have the smallest # of ticks in the wheel and still account for a full
// max duration // max duration, even if our current tick is at the maximum position and the next item to be added is at maximum
wLen := int((max / min) + 1) // timeout
wLen := int((max / min) + 2)
tw := TimerWheel{ tw := TimerWheel[T]{
wheelLen: wLen, wheelLen: wLen,
wheel: make([]*TimeoutList, wLen), wheel: make([]*TimeoutList[T], wLen),
tickDuration: min, tickDuration: min,
wheelDuration: max, wheelDuration: max,
expired: &TimeoutList{}, expired: &TimeoutList[T]{},
} }
for i := range tw.wheel { for i := range tw.wheel {
tw.wheel[i] = &TimeoutList{} tw.wheel[i] = &TimeoutList[T]{}
} }
return &tw return &tw
} }
// Add will add a firewall.Packet to the wheel in it's proper timeout // NewLockingTimerWheel is version of TimerWheel that is safe for concurrent use with a small performance penalty
func (tw *TimerWheel) Add(v firewall.Packet, timeout time.Duration) *TimeoutItem { func NewLockingTimerWheel[T any](min, max time.Duration) *LockingTimerWheel[T] {
// Check and see if we should progress the tick return &LockingTimerWheel[T]{
tw.advance(time.Now()) t: NewTimerWheel[T](min, max),
}
}
// Add will add an item to the wheel in its proper timeout.
// Caller should Advance the wheel prior to ensure the proper slot is used.
func (tw *TimerWheel[T]) Add(v T, timeout time.Duration) *TimeoutItem[T] {
i := tw.findWheel(timeout) i := tw.findWheel(timeout)
// Try to fetch off the cache // Try to fetch off the cache
@ -89,11 +99,11 @@ func (tw *TimerWheel) Add(v firewall.Packet, timeout time.Duration) *TimeoutItem
tw.itemsCached-- tw.itemsCached--
ti.Next = nil ti.Next = nil
} else { } else {
ti = &TimeoutItem{} ti = &TimeoutItem[T]{}
} }
// Relink and return // Relink and return
ti.Packet = v ti.Item = v
if tw.wheel[i].Tail == nil { if tw.wheel[i].Tail == nil {
tw.wheel[i].Head = ti tw.wheel[i].Head = ti
tw.wheel[i].Tail = ti tw.wheel[i].Tail = ti
@ -105,9 +115,12 @@ func (tw *TimerWheel) Add(v firewall.Packet, timeout time.Duration) *TimeoutItem
return ti return ti
} }
func (tw *TimerWheel) Purge() (firewall.Packet, bool) { // Purge removes and returns the first available expired item from the wheel and the 2nd argument is true.
// If no item is available then an empty T is returned and the 2nd argument is false.
func (tw *TimerWheel[T]) Purge() (T, bool) {
if tw.expired.Head == nil { if tw.expired.Head == nil {
return emptyFWPacket, false var na T
return na, false
} }
ti := tw.expired.Head ti := tw.expired.Head
@ -127,11 +140,11 @@ func (tw *TimerWheel) Purge() (firewall.Packet, bool) {
tw.itemsCached++ tw.itemsCached++
} }
return ti.Packet, true return ti.Item, true
} }
// advance will move the wheel forward by proper number of ticks. The caller _should_ lock the wheel before calling this // findWheel find the next position in the wheel for the provided timeout given the current tick
func (tw *TimerWheel) findWheel(timeout time.Duration) (i int) { func (tw *TimerWheel[T]) findWheel(timeout time.Duration) (i int) {
if timeout < tw.tickDuration { if timeout < tw.tickDuration {
// Can't track anything below the set resolution // Can't track anything below the set resolution
timeout = tw.tickDuration timeout = tw.tickDuration
@ -153,8 +166,9 @@ func (tw *TimerWheel) findWheel(timeout time.Duration) (i int) {
return tick return tick
} }
// advance will lock and move the wheel forward by proper number of ticks. // Advance will move the wheel forward by the appropriate number of ticks for the provided time and all items
func (tw *TimerWheel) advance(now time.Time) { // passed over will be moved to the expired list. Calling Purge is necessary to remove them entirely.
func (tw *TimerWheel[T]) Advance(now time.Time) {
if tw.lastTick == nil { if tw.lastTick == nil {
tw.lastTick = &now tw.lastTick = &now
} }
@ -191,3 +205,21 @@ func (tw *TimerWheel) advance(now time.Time) {
newTick := tw.lastTick.Add(tw.tickDuration * time.Duration(adv)) newTick := tw.lastTick.Add(tw.tickDuration * time.Duration(adv))
tw.lastTick = &newTick tw.lastTick = &newTick
} }
func (lw *LockingTimerWheel[T]) Add(v T, timeout time.Duration) *TimeoutItem[T] {
lw.m.Lock()
defer lw.m.Unlock()
return lw.t.Add(v, timeout)
}
func (lw *LockingTimerWheel[T]) Purge() (T, bool) {
lw.m.Lock()
defer lw.m.Unlock()
return lw.t.Purge()
}
func (lw *LockingTimerWheel[T]) Advance(now time.Time) {
lw.m.Lock()
defer lw.m.Unlock()
lw.t.Advance(now)
}

View File

@ -1,198 +0,0 @@
package nebula
import (
"sync"
"time"
"github.com/slackhq/nebula/iputil"
)
// How many timer objects should be cached
const systemTimerCacheMax = 50000
type SystemTimerWheel struct {
// Current tick
current int
// Cheat on finding the length of the wheel
wheelLen int
// Last time we ticked, since we are lazy ticking
lastTick *time.Time
// Durations of a tick and the entire wheel
tickDuration time.Duration
wheelDuration time.Duration
// The actual wheel which is just a set of singly linked lists, head/tail pointers
wheel []*SystemTimeoutList
// Singly linked list of items that have timed out of the wheel
expired *SystemTimeoutList
// Item cache to avoid garbage collect
itemCache *SystemTimeoutItem
itemsCached int
lock sync.Mutex
}
// Represents a tick in the wheel
type SystemTimeoutList struct {
Head *SystemTimeoutItem
Tail *SystemTimeoutItem
}
// Represents an item within a tick
type SystemTimeoutItem struct {
Item iputil.VpnIp
Next *SystemTimeoutItem
}
// Builds a timer wheel and identifies the tick duration and wheel duration from the provided values
// Purge must be called once per entry to actually remove anything
func NewSystemTimerWheel(min, max time.Duration) *SystemTimerWheel {
//TODO provide an error
//if min >= max {
// return nil
//}
// Round down and add 1 so we can have the smallest # of ticks in the wheel and still account for a full
// max duration
wLen := int((max / min) + 1)
tw := SystemTimerWheel{
wheelLen: wLen,
wheel: make([]*SystemTimeoutList, wLen),
tickDuration: min,
wheelDuration: max,
expired: &SystemTimeoutList{},
}
for i := range tw.wheel {
tw.wheel[i] = &SystemTimeoutList{}
}
return &tw
}
func (tw *SystemTimerWheel) Add(v iputil.VpnIp, timeout time.Duration) *SystemTimeoutItem {
tw.lock.Lock()
defer tw.lock.Unlock()
// Check and see if we should progress the tick
//tw.advance(time.Now())
i := tw.findWheel(timeout)
// Try to fetch off the cache
ti := tw.itemCache
if ti != nil {
tw.itemCache = ti.Next
ti.Next = nil
tw.itemsCached--
} else {
ti = &SystemTimeoutItem{}
}
// Relink and return
ti.Item = v
ti.Next = tw.wheel[i].Head
tw.wheel[i].Head = ti
if tw.wheel[i].Tail == nil {
tw.wheel[i].Tail = ti
}
return ti
}
func (tw *SystemTimerWheel) Purge() interface{} {
tw.lock.Lock()
defer tw.lock.Unlock()
if tw.expired.Head == nil {
return nil
}
ti := tw.expired.Head
tw.expired.Head = ti.Next
if tw.expired.Head == nil {
tw.expired.Tail = nil
}
p := ti.Item
// Clear out the items references
ti.Item = 0
ti.Next = nil
// Maybe cache it for later
if tw.itemsCached < systemTimerCacheMax {
ti.Next = tw.itemCache
tw.itemCache = ti
tw.itemsCached++
}
return p
}
func (tw *SystemTimerWheel) findWheel(timeout time.Duration) (i int) {
if timeout < tw.tickDuration {
// Can't track anything below the set resolution
timeout = tw.tickDuration
} else if timeout > tw.wheelDuration {
// We aren't handling timeouts greater than the wheels duration
timeout = tw.wheelDuration
}
// Find the next highest, rounding up
tick := int(((timeout - 1) / tw.tickDuration) + 1)
// Add another tick since the current tick may almost be over then map it to the wheel from our
// current position
tick += tw.current + 1
if tick >= tw.wheelLen {
tick -= tw.wheelLen
}
return tick
}
func (tw *SystemTimerWheel) advance(now time.Time) {
tw.lock.Lock()
defer tw.lock.Unlock()
if tw.lastTick == nil {
tw.lastTick = &now
}
// We want to round down
ticks := int(now.Sub(*tw.lastTick) / tw.tickDuration)
//l.Infoln("Ticks: ", ticks)
for i := 0; i < ticks; i++ {
tw.current++
//l.Infoln("Tick: ", tw.current)
if tw.current >= tw.wheelLen {
tw.current = 0
}
// We need to append the expired items as to not starve evicting the oldest ones
if tw.expired.Tail == nil {
tw.expired.Head = tw.wheel[tw.current].Head
tw.expired.Tail = tw.wheel[tw.current].Tail
} else {
tw.expired.Tail.Next = tw.wheel[tw.current].Head
if tw.wheel[tw.current].Tail != nil {
tw.expired.Tail = tw.wheel[tw.current].Tail
}
}
//l.Infoln("Head: ", tw.expired.Head, "Tail: ", tw.expired.Tail)
tw.wheel[tw.current].Head = nil
tw.wheel[tw.current].Tail = nil
tw.lastTick = &now
}
}

View File

@ -1,135 +0,0 @@
package nebula
import (
"net"
"testing"
"time"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
)
func TestNewSystemTimerWheel(t *testing.T) {
// Make sure we get an object we expect
tw := NewSystemTimerWheel(time.Second, time.Second*10)
assert.Equal(t, 11, tw.wheelLen)
assert.Equal(t, 0, tw.current)
assert.Nil(t, tw.lastTick)
assert.Equal(t, time.Second*1, tw.tickDuration)
assert.Equal(t, time.Second*10, tw.wheelDuration)
assert.Len(t, tw.wheel, 11)
// Assert the math is correct
tw = NewSystemTimerWheel(time.Second*3, time.Second*10)
assert.Equal(t, 4, tw.wheelLen)
tw = NewSystemTimerWheel(time.Second*120, time.Minute*10)
assert.Equal(t, 6, tw.wheelLen)
}
func TestSystemTimerWheel_findWheel(t *testing.T) {
tw := NewSystemTimerWheel(time.Second, time.Second*10)
assert.Len(t, tw.wheel, 11)
// Current + tick + 1 since we don't know how far into current we are
assert.Equal(t, 2, tw.findWheel(time.Second*1))
// Scale up to min duration
assert.Equal(t, 2, tw.findWheel(time.Millisecond*1))
// Make sure we hit that last index
assert.Equal(t, 0, tw.findWheel(time.Second*10))
// Scale down to max duration
assert.Equal(t, 0, tw.findWheel(time.Second*11))
tw.current = 1
// Make sure we account for the current position properly
assert.Equal(t, 3, tw.findWheel(time.Second*1))
assert.Equal(t, 1, tw.findWheel(time.Second*10))
}
func TestSystemTimerWheel_Add(t *testing.T) {
tw := NewSystemTimerWheel(time.Second, time.Second*10)
fp1 := iputil.Ip2VpnIp(net.ParseIP("1.2.3.4"))
tw.Add(fp1, time.Second*1)
// Make sure we set head and tail properly
assert.NotNil(t, tw.wheel[2])
assert.Equal(t, fp1, tw.wheel[2].Head.Item)
assert.Nil(t, tw.wheel[2].Head.Next)
assert.Equal(t, fp1, tw.wheel[2].Tail.Item)
assert.Nil(t, tw.wheel[2].Tail.Next)
// Make sure we only modify head
fp2 := iputil.Ip2VpnIp(net.ParseIP("1.2.3.4"))
tw.Add(fp2, time.Second*1)
assert.Equal(t, fp2, tw.wheel[2].Head.Item)
assert.Equal(t, fp1, tw.wheel[2].Head.Next.Item)
assert.Equal(t, fp1, tw.wheel[2].Tail.Item)
assert.Nil(t, tw.wheel[2].Tail.Next)
// Make sure we use free'd items first
tw.itemCache = &SystemTimeoutItem{}
tw.itemsCached = 1
tw.Add(fp2, time.Second*1)
assert.Nil(t, tw.itemCache)
assert.Equal(t, 0, tw.itemsCached)
}
func TestSystemTimerWheel_Purge(t *testing.T) {
// First advance should set the lastTick and do nothing else
tw := NewSystemTimerWheel(time.Second, time.Second*10)
assert.Nil(t, tw.lastTick)
tw.advance(time.Now())
assert.NotNil(t, tw.lastTick)
assert.Equal(t, 0, tw.current)
fps := []iputil.VpnIp{9, 10, 11, 12}
//fp1 := ip2int(net.ParseIP("1.2.3.4"))
tw.Add(fps[0], time.Second*1)
tw.Add(fps[1], time.Second*1)
tw.Add(fps[2], time.Second*2)
tw.Add(fps[3], time.Second*2)
ta := time.Now().Add(time.Second * 3)
lastTick := *tw.lastTick
tw.advance(ta)
assert.Equal(t, 3, tw.current)
assert.True(t, tw.lastTick.After(lastTick))
// Make sure we get all 4 packets back
for i := 0; i < 4; i++ {
assert.Contains(t, fps, tw.Purge())
}
// Make sure there aren't any leftover
assert.Nil(t, tw.Purge())
assert.Nil(t, tw.expired.Head)
assert.Nil(t, tw.expired.Tail)
// Make sure we cached the free'd items
assert.Equal(t, 4, tw.itemsCached)
ci := tw.itemCache
for i := 0; i < 4; i++ {
assert.NotNil(t, ci)
ci = ci.Next
}
assert.Nil(t, ci)
// Lets make sure we roll over properly
ta = ta.Add(time.Second * 5)
tw.advance(ta)
assert.Equal(t, 8, tw.current)
ta = ta.Add(time.Second * 2)
tw.advance(ta)
assert.Equal(t, 10, tw.current)
ta = ta.Add(time.Second * 1)
tw.advance(ta)
assert.Equal(t, 0, tw.current)
}

View File

@ -10,25 +10,37 @@ import (
func TestNewTimerWheel(t *testing.T) { func TestNewTimerWheel(t *testing.T) {
// Make sure we get an object we expect // Make sure we get an object we expect
tw := NewTimerWheel(time.Second, time.Second*10) tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10)
assert.Equal(t, 11, tw.wheelLen) assert.Equal(t, 12, tw.wheelLen)
assert.Equal(t, 0, tw.current) assert.Equal(t, 0, tw.current)
assert.Nil(t, tw.lastTick) assert.Nil(t, tw.lastTick)
assert.Equal(t, time.Second*1, tw.tickDuration) assert.Equal(t, time.Second*1, tw.tickDuration)
assert.Equal(t, time.Second*10, tw.wheelDuration) assert.Equal(t, time.Second*10, tw.wheelDuration)
assert.Len(t, tw.wheel, 11) assert.Len(t, tw.wheel, 12)
// Assert the math is correct // Assert the math is correct
tw = NewTimerWheel(time.Second*3, time.Second*10) tw = NewTimerWheel[firewall.Packet](time.Second*3, time.Second*10)
assert.Equal(t, 4, tw.wheelLen) assert.Equal(t, 5, tw.wheelLen)
tw = NewTimerWheel[firewall.Packet](time.Second*120, time.Minute*10)
assert.Equal(t, 7, tw.wheelLen)
// Test empty purge of non nil items
i, ok := tw.Purge()
assert.Equal(t, firewall.Packet{}, i)
assert.False(t, ok)
// Test empty purges of nil items
tw2 := NewTimerWheel[*int](time.Second, time.Second*10)
i2, ok := tw2.Purge()
assert.Nil(t, i2)
assert.False(t, ok)
tw = NewTimerWheel(time.Second*120, time.Minute*10)
assert.Equal(t, 6, tw.wheelLen)
} }
func TestTimerWheel_findWheel(t *testing.T) { func TestTimerWheel_findWheel(t *testing.T) {
tw := NewTimerWheel(time.Second, time.Second*10) tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10)
assert.Len(t, tw.wheel, 11) assert.Len(t, tw.wheel, 12)
// Current + tick + 1 since we don't know how far into current we are // Current + tick + 1 since we don't know how far into current we are
assert.Equal(t, 2, tw.findWheel(time.Second*1)) assert.Equal(t, 2, tw.findWheel(time.Second*1))
@ -37,51 +49,68 @@ func TestTimerWheel_findWheel(t *testing.T) {
assert.Equal(t, 2, tw.findWheel(time.Millisecond*1)) assert.Equal(t, 2, tw.findWheel(time.Millisecond*1))
// Make sure we hit that last index // Make sure we hit that last index
assert.Equal(t, 0, tw.findWheel(time.Second*10)) assert.Equal(t, 11, tw.findWheel(time.Second*10))
// Scale down to max duration // Scale down to max duration
assert.Equal(t, 0, tw.findWheel(time.Second*11)) assert.Equal(t, 11, tw.findWheel(time.Second*11))
tw.current = 1 tw.current = 1
// Make sure we account for the current position properly // Make sure we account for the current position properly
assert.Equal(t, 3, tw.findWheel(time.Second*1)) assert.Equal(t, 3, tw.findWheel(time.Second*1))
assert.Equal(t, 1, tw.findWheel(time.Second*10)) assert.Equal(t, 0, tw.findWheel(time.Second*10))
} }
func TestTimerWheel_Add(t *testing.T) { func TestTimerWheel_Add(t *testing.T) {
tw := NewTimerWheel(time.Second, time.Second*10) tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10)
fp1 := firewall.Packet{} fp1 := firewall.Packet{}
tw.Add(fp1, time.Second*1) tw.Add(fp1, time.Second*1)
// Make sure we set head and tail properly // Make sure we set head and tail properly
assert.NotNil(t, tw.wheel[2]) assert.NotNil(t, tw.wheel[2])
assert.Equal(t, fp1, tw.wheel[2].Head.Packet) assert.Equal(t, fp1, tw.wheel[2].Head.Item)
assert.Nil(t, tw.wheel[2].Head.Next) assert.Nil(t, tw.wheel[2].Head.Next)
assert.Equal(t, fp1, tw.wheel[2].Tail.Packet) assert.Equal(t, fp1, tw.wheel[2].Tail.Item)
assert.Nil(t, tw.wheel[2].Tail.Next) assert.Nil(t, tw.wheel[2].Tail.Next)
// Make sure we only modify head // Make sure we only modify head
fp2 := firewall.Packet{} fp2 := firewall.Packet{}
tw.Add(fp2, time.Second*1) tw.Add(fp2, time.Second*1)
assert.Equal(t, fp2, tw.wheel[2].Head.Packet) assert.Equal(t, fp2, tw.wheel[2].Head.Item)
assert.Equal(t, fp1, tw.wheel[2].Head.Next.Packet) assert.Equal(t, fp1, tw.wheel[2].Head.Next.Item)
assert.Equal(t, fp1, tw.wheel[2].Tail.Packet) assert.Equal(t, fp1, tw.wheel[2].Tail.Item)
assert.Nil(t, tw.wheel[2].Tail.Next) assert.Nil(t, tw.wheel[2].Tail.Next)
// Make sure we use free'd items first // Make sure we use free'd items first
tw.itemCache = &TimeoutItem{} tw.itemCache = &TimeoutItem[firewall.Packet]{}
tw.itemsCached = 1 tw.itemsCached = 1
tw.Add(fp2, time.Second*1) tw.Add(fp2, time.Second*1)
assert.Nil(t, tw.itemCache) assert.Nil(t, tw.itemCache)
assert.Equal(t, 0, tw.itemsCached) assert.Equal(t, 0, tw.itemsCached)
// Ensure that all configurations of a wheel does not result in calculating an overflow of the wheel
for min := time.Duration(1); min < 100; min++ {
for max := min; max < 100; max++ {
tw = NewTimerWheel[firewall.Packet](min, max)
for current := 0; current < tw.wheelLen; current++ {
tw.current = current
for timeout := time.Duration(0); timeout <= tw.wheelDuration; timeout++ {
tick := tw.findWheel(timeout)
if tick >= tw.wheelLen {
t.Errorf("Min: %v; Max: %v; Wheel len: %v; Current Tick: %v; Insert timeout: %v; Calc tick: %v", min, max, tw.wheelLen, current, timeout, tick)
}
}
}
}
}
} }
func TestTimerWheel_Purge(t *testing.T) { func TestTimerWheel_Purge(t *testing.T) {
// First advance should set the lastTick and do nothing else // First advance should set the lastTick and do nothing else
tw := NewTimerWheel(time.Second, time.Second*10) tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10)
assert.Nil(t, tw.lastTick) assert.Nil(t, tw.lastTick)
tw.advance(time.Now()) tw.Advance(time.Now())
assert.NotNil(t, tw.lastTick) assert.NotNil(t, tw.lastTick)
assert.Equal(t, 0, tw.current) assert.Equal(t, 0, tw.current)
@ -99,7 +128,7 @@ func TestTimerWheel_Purge(t *testing.T) {
ta := time.Now().Add(time.Second * 3) ta := time.Now().Add(time.Second * 3)
lastTick := *tw.lastTick lastTick := *tw.lastTick
tw.advance(ta) tw.Advance(ta)
assert.Equal(t, 3, tw.current) assert.Equal(t, 3, tw.current)
assert.True(t, tw.lastTick.After(lastTick)) assert.True(t, tw.lastTick.After(lastTick))
@ -125,16 +154,20 @@ func TestTimerWheel_Purge(t *testing.T) {
} }
assert.Nil(t, ci) assert.Nil(t, ci)
// Lets make sure we roll over properly // Let's make sure we roll over properly
ta = ta.Add(time.Second * 5) ta = ta.Add(time.Second * 5)
tw.advance(ta) tw.Advance(ta)
assert.Equal(t, 8, tw.current) assert.Equal(t, 8, tw.current)
ta = ta.Add(time.Second * 2) ta = ta.Add(time.Second * 2)
tw.advance(ta) tw.Advance(ta)
assert.Equal(t, 10, tw.current) assert.Equal(t, 10, tw.current)
ta = ta.Add(time.Second * 1) ta = ta.Add(time.Second * 1)
tw.advance(ta) tw.Advance(ta)
assert.Equal(t, 11, tw.current)
ta = ta.Add(time.Second * 1)
tw.Advance(ta)
assert.Equal(t, 0, tw.current) assert.Equal(t, 0, tw.current)
} }

View File

@ -66,7 +66,7 @@ func (u *Conn) Send(packet *Packet) {
u.l.WithField("header", h). u.l.WithField("header", h).
WithField("udpAddr", fmt.Sprintf("%v:%v", packet.FromIp, packet.FromPort)). WithField("udpAddr", fmt.Sprintf("%v:%v", packet.FromIp, packet.FromPort)).
WithField("dataLen", len(packet.Data)). WithField("dataLen", len(packet.Data)).
Info("UDP receiving injected packet") Debug("UDP receiving injected packet")
} }
u.RxPackets <- packet u.RxPackets <- packet
} }

View File

@ -59,18 +59,14 @@ func procyield(cycles uint32)
//go:linkname nanotime runtime.nanotime //go:linkname nanotime runtime.nanotime
func nanotime() int64 func nanotime() int64
//
// CreateTUN creates a Wintun interface with the given name. Should a Wintun // CreateTUN creates a Wintun interface with the given name. Should a Wintun
// interface with the same name exist, it is reused. // interface with the same name exist, it is reused.
//
func CreateTUN(ifname string, mtu int) (Device, error) { func CreateTUN(ifname string, mtu int) (Device, error) {
return CreateTUNWithRequestedGUID(ifname, WintunStaticRequestedGUID, mtu) return CreateTUNWithRequestedGUID(ifname, WintunStaticRequestedGUID, mtu)
} }
//
// CreateTUNWithRequestedGUID creates a Wintun interface with the given name and // CreateTUNWithRequestedGUID creates a Wintun interface with the given name and
// a requested GUID. Should a Wintun interface with the same name exist, it is reused. // a requested GUID. Should a Wintun interface with the same name exist, it is reused.
//
func CreateTUNWithRequestedGUID(ifname string, requestedGUID *windows.GUID, mtu int) (Device, error) { func CreateTUNWithRequestedGUID(ifname string, requestedGUID *windows.GUID, mtu int) (Device, error) {
wt, err := wintun.CreateAdapter(ifname, WintunTunnelType, requestedGUID) wt, err := wintun.CreateAdapter(ifname, WintunTunnelType, requestedGUID)
if err != nil { if err != nil {