From 9af242dc47706c3cdd01f829e98e7c859a3704ab Mon Sep 17 00:00:00 2001 From: Wade Simmons Date: Mon, 31 Oct 2022 13:37:41 -0400 Subject: [PATCH 01/26] switch to new sync/atomic helpers in go1.19 (#728) These new helpers make the code a lot cleaner. I confirmed that the simple helpers like `atomic.Int64` don't add any extra overhead as they get inlined by the compiler. `atomic.Pointer` adds an extra method call as it no longer gets inlined, but we aren't using these on the hot path so it is probably okay. --- .github/workflows/gofmt.yml | 8 ++-- .github/workflows/release.yml | 12 +++--- .github/workflows/smoke.yml | 8 ++-- .github/workflows/test.yml | 16 ++++---- Makefile | 2 +- cmd/nebula-service/main.go | 2 +- cmd/nebula/main.go | 2 +- connection_manager_test.go | 20 ++++++++-- connection_state.go | 24 +++++------ control.go | 3 +- firewall.go | 2 +- firewall/cache.go | 6 +-- go.mod | 2 +- handshake_ix.go | 3 +- handshake_manager_test.go | 13 +----- hostmap.go | 9 ++--- inside.go | 6 +-- interface.go | 6 +-- lighthouse.go | 75 ++++++++++++++++++----------------- punchy.go | 29 +++++--------- relay_manager.go | 17 +++----- remote_list.go | 2 +- wintun/tun.go | 4 -- 23 files changed, 126 insertions(+), 145 deletions(-) diff --git a/.github/workflows/gofmt.yml b/.github/workflows/gofmt.yml index ddfca5a..a00453b 100644 --- a/.github/workflows/gofmt.yml +++ b/.github/workflows/gofmt.yml @@ -14,10 +14,10 @@ jobs: runs-on: ubuntu-latest steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 id: go - name: Check out code into the Go module directory @@ -26,9 +26,9 @@ jobs: - uses: actions/cache@v2 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-gofmt1.18-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-gofmt1.19-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-gofmt1.18- + ${{ runner.os }}-gofmt1.19- - name: Install goimports run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7485beb..572b0ff 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,10 +10,10 @@ jobs: name: Build Linux All runs-on: ubuntu-latest steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 - name: Checkout code uses: actions/checkout@v2 @@ -34,10 +34,10 @@ jobs: name: Build Windows runs-on: windows-latest steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 - name: Checkout code uses: actions/checkout@v2 @@ -68,10 +68,10 @@ jobs: HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }} runs-on: macos-11 steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 - name: Checkout code uses: actions/checkout@v2 diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index 9920992..162d526 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -18,10 +18,10 @@ jobs: runs-on: ubuntu-latest steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 id: go - name: Check out code into the Go module directory @@ -30,9 +30,9 @@ jobs: - uses: actions/cache@v2 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-go1.19-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-go1.18- + ${{ runner.os }}-go1.19- - name: build run: make bin-docker diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fb69112..69ed606 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,10 +18,10 @@ jobs: runs-on: ubuntu-latest steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 id: go - name: Check out code into the Go module directory @@ -30,9 +30,9 @@ jobs: - uses: actions/cache@v2 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-go1.19-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-go1.18- + ${{ runner.os }}-go1.19- - name: Build run: make all @@ -57,10 +57,10 @@ jobs: os: [windows-latest, macos-11] steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 id: go - name: Check out code into the Go module directory @@ -69,9 +69,9 @@ jobs: - uses: actions/cache@v2 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-go1.19-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-go1.18- + ${{ runner.os }}-go1.19- - name: Build nebula run: go build ./cmd/nebula diff --git a/Makefile b/Makefile index 188ffea..b31c0fc 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -GOMINVERSION = 1.18 +GOMINVERSION = 1.19 NEBULA_CMD_PATH = "./cmd/nebula" GO111MODULE = on export GO111MODULE diff --git a/cmd/nebula-service/main.go b/cmd/nebula-service/main.go index f211c97..c1de267 100644 --- a/cmd/nebula-service/main.go +++ b/cmd/nebula-service/main.go @@ -13,7 +13,7 @@ import ( // A version string that can be set with // -// -ldflags "-X main.Build=SOMEVERSION" +// -ldflags "-X main.Build=SOMEVERSION" // // at compile-time. var Build string diff --git a/cmd/nebula/main.go b/cmd/nebula/main.go index efe406b..e9b285e 100644 --- a/cmd/nebula/main.go +++ b/cmd/nebula/main.go @@ -13,7 +13,7 @@ import ( // A version string that can be set with // -// -ldflags "-X main.Build=SOMEVERSION" +// -ldflags "-X main.Build=SOMEVERSION" // // at compile-time. var Build string diff --git a/connection_manager_test.go b/connection_manager_test.go index bae48e5..df42800 100644 --- a/connection_manager_test.go +++ b/connection_manager_test.go @@ -18,6 +18,20 @@ import ( var vpnIp iputil.VpnIp +func newTestLighthouse() *LightHouse { + lh := &LightHouse{ + l: test.NewLogger(), + addrMap: map[iputil.VpnIp]*RemoteList{}, + } + lighthouses := map[iputil.VpnIp]struct{}{} + staticList := map[iputil.VpnIp]struct{}{} + + lh.lighthouses.Store(&lighthouses) + lh.staticList.Store(&staticList) + + return lh +} + func Test_NewConnectionManagerTest(t *testing.T) { l := test.NewLogger() //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") @@ -35,7 +49,7 @@ func Test_NewConnectionManagerTest(t *testing.T) { rawCertificateNoKey: []byte{}, } - lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} + lh := newTestLighthouse() ifce := &Interface{ hostMap: hostMap, inside: &test.NoopTun{}, @@ -104,7 +118,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) { rawCertificateNoKey: []byte{}, } - lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} + lh := newTestLighthouse() ifce := &Interface{ hostMap: hostMap, inside: &test.NoopTun{}, @@ -213,7 +227,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) { rawCertificateNoKey: []byte{}, } - lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} + lh := newTestLighthouse() ifce := &Interface{ hostMap: hostMap, inside: &test.NoopTun{}, diff --git a/connection_state.go b/connection_state.go index c28cc42..6bbb02f 100644 --- a/connection_state.go +++ b/connection_state.go @@ -14,17 +14,17 @@ import ( const ReplayWindow = 1024 type ConnectionState struct { - eKey *NebulaCipherState - dKey *NebulaCipherState - H *noise.HandshakeState - certState *CertState - peerCert *cert.NebulaCertificate - initiator bool - atomicMessageCounter uint64 - window *Bits - queueLock sync.Mutex - writeLock sync.Mutex - ready bool + eKey *NebulaCipherState + dKey *NebulaCipherState + H *noise.HandshakeState + certState *CertState + peerCert *cert.NebulaCertificate + initiator bool + messageCounter atomic.Uint64 + window *Bits + queueLock sync.Mutex + writeLock sync.Mutex + ready bool } func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState { @@ -70,7 +70,7 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) { return json.Marshal(m{ "certificate": cs.peerCert, "initiator": cs.initiator, - "message_counter": atomic.LoadUint64(&cs.atomicMessageCounter), + "message_counter": cs.messageCounter.Load(), "ready": cs.ready, }) } diff --git a/control.go b/control.go index 6e7bda1..2e7ffee 100644 --- a/control.go +++ b/control.go @@ -5,7 +5,6 @@ import ( "net" "os" "os/signal" - "sync/atomic" "syscall" "github.com/sirupsen/logrus" @@ -219,7 +218,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo { } if h.ConnectionState != nil { - chi.MessageCounter = atomic.LoadUint64(&h.ConnectionState.atomicMessageCounter) + chi.MessageCounter = h.ConnectionState.messageCounter.Load() } if c := h.GetCert(); c != nil { diff --git a/firewall.go b/firewall.go index dfc7fd1..99b18f8 100644 --- a/firewall.go +++ b/firewall.go @@ -879,7 +879,7 @@ func parsePort(s string) (startPort, endPort int32, err error) { return } -//TODO: write tests for these +// TODO: write tests for these func setTCPRTTTracking(c *conn, p []byte) { if c.Seq != 0 { return diff --git a/firewall/cache.go b/firewall/cache.go index 5560ab2..71b83f4 100644 --- a/firewall/cache.go +++ b/firewall/cache.go @@ -13,7 +13,7 @@ type ConntrackCache map[Packet]struct{} type ConntrackCacheTicker struct { cacheV uint64 - cacheTick uint64 + cacheTick atomic.Uint64 cache ConntrackCache } @@ -35,7 +35,7 @@ func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker { func (c *ConntrackCacheTicker) tick(d time.Duration) { for { time.Sleep(d) - atomic.AddUint64(&c.cacheTick, 1) + c.cacheTick.Add(1) } } @@ -45,7 +45,7 @@ func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache { if c == nil { return nil } - if tick := atomic.LoadUint64(&c.cacheTick); tick != c.cacheV { + if tick := c.cacheTick.Load(); tick != c.cacheV { c.cacheV = tick if ll := len(c.cache); ll > 0 { if l.Level == logrus.DebugLevel { diff --git a/go.mod b/go.mod index 69bb424..5e7393e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/slackhq/nebula -go 1.18 +go 1.19 require ( github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be diff --git a/handshake_ix.go b/handshake_ix.go index fd1a908..11a16a6 100644 --- a/handshake_ix.go +++ b/handshake_ix.go @@ -1,7 +1,6 @@ package nebula import ( - "sync/atomic" "time" "github.com/flynn/noise" @@ -51,7 +50,7 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) { } h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1) - atomic.AddUint64(&ci.atomicMessageCounter, 1) + ci.messageCounter.Add(1) msg, _, _, err := ci.H.WriteMessage(h, hsBytes) if err != nil { diff --git a/handshake_manager_test.go b/handshake_manager_test.go index ae8b267..5381b23 100644 --- a/handshake_manager_test.go +++ b/handshake_manager_test.go @@ -21,11 +21,7 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) { preferredRanges := []*net.IPNet{localrange} mw := &mockEncWriter{} mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) - lh := &LightHouse{ - atomicStaticList: make(map[iputil.VpnIp]struct{}), - atomicLighthouses: make(map[iputil.VpnIp]struct{}), - addrMap: make(map[iputil.VpnIp]*RemoteList), - } + lh := newTestLighthouse() blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) @@ -79,12 +75,7 @@ func Test_NewHandshakeManagerTrigger(t *testing.T) { preferredRanges := []*net.IPNet{localrange} mw := &mockEncWriter{} mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) - lh := &LightHouse{ - addrMap: make(map[iputil.VpnIp]*RemoteList), - l: l, - atomicStaticList: make(map[iputil.VpnIp]struct{}), - atomicLighthouses: make(map[iputil.VpnIp]struct{}), - } + lh := newTestLighthouse() blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) diff --git a/hostmap.go b/hostmap.go index 402c1a8..84b2041 100644 --- a/hostmap.go +++ b/hostmap.go @@ -18,7 +18,7 @@ import ( "github.com/slackhq/nebula/udp" ) -//const ProbeLen = 100 +// const ProbeLen = 100 const PromoteEvery = 1000 const ReQueryEvery = 5000 const MaxRemotes = 10 @@ -153,7 +153,7 @@ type HostInfo struct { remote *udp.Addr remotes *RemoteList - promoteCounter uint32 + promoteCounter atomic.Uint32 ConnectionState *ConnectionState handshakeStart time.Time //todo: this an entry in the handshake manager HandshakeReady bool //todo: being in the manager means you are ready @@ -284,7 +284,6 @@ func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) ( if h, ok := hm.Hosts[vpnIp]; !ok { hm.RUnlock() h = &HostInfo{ - promoteCounter: 0, vpnIp: vpnIp, HandshakePacket: make(map[uint8][]byte, 0), relayState: RelayState{ @@ -591,7 +590,7 @@ func (hm *HostMap) Punchy(ctx context.Context, conn *udp.Conn) { // TryPromoteBest handles re-querying lighthouses and probing for better paths // NOTE: It is an error to call this if you are a lighthouse since they should not roam clients! func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) { - c := atomic.AddUint32(&i.promoteCounter, 1) + c := i.promoteCounter.Add(1) if c%PromoteEvery == 0 { // The lock here is currently protecting i.remote access i.RLock() @@ -658,7 +657,7 @@ func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) { i.HandshakeComplete = true //TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen. // Clamping it to 2 gets us out of the woods for now - atomic.StoreUint64(&i.ConnectionState.atomicMessageCounter, 2) + i.ConnectionState.messageCounter.Store(2) if l.Level >= logrus.DebugLevel { i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore)) diff --git a/inside.go b/inside.go index 39c5a43..177bcd3 100644 --- a/inside.go +++ b/inside.go @@ -1,8 +1,6 @@ package nebula import ( - "sync/atomic" - "github.com/flynn/noise" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/firewall" @@ -222,7 +220,7 @@ func (f *Interface) SendVia(viaIfc interface{}, ) { via := viaIfc.(*HostInfo) relay := relayIfc.(*Relay) - c := atomic.AddUint64(&via.ConnectionState.atomicMessageCounter, 1) + c := via.ConnectionState.messageCounter.Add(1) out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c) f.connectionManager.Out(via.vpnIp) @@ -281,7 +279,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType //TODO: enable if we do more than 1 tun queue //ci.writeLock.Lock() - c := atomic.AddUint64(&ci.atomicMessageCounter, 1) + c := ci.messageCounter.Add(1) //l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p) out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c) diff --git a/interface.go b/interface.go index a84eb7f..632e823 100644 --- a/interface.go +++ b/interface.go @@ -67,7 +67,7 @@ type Interface struct { routines int caPool *cert.NebulaCAPool disconnectInvalid bool - closed int32 + closed atomic.Bool relayManager *relayManager sendRecvErrorConfig sendRecvErrorConfig @@ -253,7 +253,7 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) { for { n, err := reader.Read(packet) if err != nil { - if errors.Is(err, os.ErrClosed) && atomic.LoadInt32(&f.closed) != 0 { + if errors.Is(err, os.ErrClosed) && f.closed.Load() { return } @@ -391,7 +391,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) { } func (f *Interface) Close() error { - atomic.StoreInt32(&f.closed, 1) + f.closed.Store(true) // Release the tun device return f.inside.Close() diff --git a/lighthouse.go b/lighthouse.go index 4987603..60e1f29 100644 --- a/lighthouse.go +++ b/lighthouse.go @@ -9,7 +9,6 @@ import ( "sync" "sync/atomic" "time" - "unsafe" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" @@ -49,29 +48,29 @@ type LightHouse struct { // respond with. // - When we are not a lighthouse, this filters which addresses we accept // from lighthouses. - atomicRemoteAllowList *RemoteAllowList + remoteAllowList atomic.Pointer[RemoteAllowList] // filters local addresses that we advertise to lighthouses - atomicLocalAllowList *LocalAllowList + localAllowList atomic.Pointer[LocalAllowList] // used to trigger the HandshakeManager when we receive HostQueryReply handshakeTrigger chan<- iputil.VpnIp - // atomicStaticList exists to avoid having a bool in each addrMap entry + // staticList exists to avoid having a bool in each addrMap entry // since static should be rare - atomicStaticList map[iputil.VpnIp]struct{} - atomicLighthouses map[iputil.VpnIp]struct{} + staticList atomic.Pointer[map[iputil.VpnIp]struct{}] + lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}] - atomicInterval int64 + interval atomic.Int64 updateCancel context.CancelFunc updateParentCtx context.Context updateUdp udp.EncWriter nebulaPort uint32 // 32 bits because protobuf does not have a uint16 - atomicAdvertiseAddrs []netIpAndPort + advertiseAddrs atomic.Pointer[[]netIpAndPort] // IP's of relays that can be used by peers to access me - atomicRelaysForMe []iputil.VpnIp + relaysForMe atomic.Pointer[[]iputil.VpnIp] metrics *MessageMetrics metricHolepunchTx metrics.Counter @@ -98,18 +97,20 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, ones, _ := myVpnNet.Mask.Size() h := LightHouse{ - amLighthouse: amLighthouse, - myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP), - myVpnZeros: iputil.VpnIp(32 - ones), - myVpnNet: myVpnNet, - addrMap: make(map[iputil.VpnIp]*RemoteList), - nebulaPort: nebulaPort, - atomicLighthouses: make(map[iputil.VpnIp]struct{}), - atomicStaticList: make(map[iputil.VpnIp]struct{}), - punchConn: pc, - punchy: p, - l: l, + amLighthouse: amLighthouse, + myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP), + myVpnZeros: iputil.VpnIp(32 - ones), + myVpnNet: myVpnNet, + addrMap: make(map[iputil.VpnIp]*RemoteList), + nebulaPort: nebulaPort, + punchConn: pc, + punchy: p, + l: l, } + lighthouses := make(map[iputil.VpnIp]struct{}) + h.lighthouses.Store(&lighthouses) + staticList := make(map[iputil.VpnIp]struct{}) + h.staticList.Store(&staticList) if c.GetBool("stats.lighthouse_metrics", false) { h.metrics = newLighthouseMetrics() @@ -137,31 +138,31 @@ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, } func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} { - return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)))) + return *lh.staticList.Load() } func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} { - return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)))) + return *lh.lighthouses.Load() } func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList { - return (*RemoteAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)))) + return lh.remoteAllowList.Load() } func (lh *LightHouse) GetLocalAllowList() *LocalAllowList { - return (*LocalAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)))) + return lh.localAllowList.Load() } func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort { - return *(*[]netIpAndPort)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)))) + return *lh.advertiseAddrs.Load() } func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp { - return *(*[]iputil.VpnIp)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)))) + return *lh.relaysForMe.Load() } func (lh *LightHouse) GetUpdateInterval() int64 { - return atomic.LoadInt64(&lh.atomicInterval) + return lh.interval.Load() } func (lh *LightHouse) reload(c *config.C, initial bool) error { @@ -188,7 +189,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort}) } - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)), unsafe.Pointer(&advAddrs)) + lh.advertiseAddrs.Store(&advAddrs) if !initial { lh.l.Info("lighthouse.advertise_addrs has changed") @@ -196,10 +197,10 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { } if initial || c.HasChanged("lighthouse.interval") { - atomic.StoreInt64(&lh.atomicInterval, int64(c.GetInt("lighthouse.interval", 10))) + lh.interval.Store(int64(c.GetInt("lighthouse.interval", 10))) if !initial { - lh.l.Infof("lighthouse.interval changed to %v", lh.atomicInterval) + lh.l.Infof("lighthouse.interval changed to %v", lh.interval.Load()) if lh.updateCancel != nil { // May not always have a running routine @@ -216,7 +217,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err) } - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)), unsafe.Pointer(ral)) + lh.remoteAllowList.Store(ral) if !initial { //TODO: a diff will be annoyingly difficult lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed") @@ -229,7 +230,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err) } - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)), unsafe.Pointer(lal)) + lh.localAllowList.Store(lal) if !initial { //TODO: a diff will be annoyingly difficult lh.l.Info("lighthouse.local_allow_list has changed") @@ -244,7 +245,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { return err } - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)), unsafe.Pointer(&staticList)) + lh.staticList.Store(&staticList) if !initial { //TODO: we should remove any remote list entries for static hosts that were removed/modified? lh.l.Info("static_host_map has changed") @@ -259,7 +260,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { return err } - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)), unsafe.Pointer(&lhMap)) + lh.lighthouses.Store(&lhMap) if !initial { //NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic lh.l.Info("lighthouse.hosts has changed") @@ -274,7 +275,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { lh.l.Info("Ignoring relays from config because am_relay is true") } relaysForMe := []iputil.VpnIp{} - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe)) + lh.relaysForMe.Store(&relaysForMe) case false: relaysForMe := []iputil.VpnIp{} for _, v := range c.GetStringSlice("relay.relays", nil) { @@ -285,7 +286,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error { relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP)) } } - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe)) + lh.relaysForMe.Store(&relaysForMe) } } @@ -460,7 +461,7 @@ func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) { // AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner // We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with // And we don't want a lighthouse query reply to interfere with our learned cache if we are a client -//NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it +// NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it func (lh *LightHouse) addStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr, staticList map[iputil.VpnIp]struct{}) { lh.Lock() am := lh.unlockedGetRemoteList(vpnIp) diff --git a/punchy.go b/punchy.go index d81ed83..1ecf7c5 100644 --- a/punchy.go +++ b/punchy.go @@ -9,10 +9,10 @@ import ( ) type Punchy struct { - atomicPunch int32 - atomicRespond int32 - atomicDelay time.Duration - l *logrus.Logger + punch atomic.Bool + respond atomic.Bool + delay atomic.Int64 + l *logrus.Logger } func NewPunchyFromConfig(l *logrus.Logger, c *config.C) *Punchy { @@ -36,12 +36,7 @@ func (p *Punchy) reload(c *config.C, initial bool) { yes = c.GetBool("punchy", false) } - if yes { - atomic.StoreInt32(&p.atomicPunch, 1) - } else { - atomic.StoreInt32(&p.atomicPunch, 0) - } - + p.punch.Store(yes) } else if c.HasChanged("punchy.punch") || c.HasChanged("punchy") { //TODO: it should be relatively easy to support this, just need to be able to cancel the goroutine and boot it up from here p.l.Warn("Changing punchy.punch with reload is not supported, ignoring.") @@ -56,11 +51,7 @@ func (p *Punchy) reload(c *config.C, initial bool) { yes = c.GetBool("punch_back", false) } - if yes { - atomic.StoreInt32(&p.atomicRespond, 1) - } else { - atomic.StoreInt32(&p.atomicRespond, 0) - } + p.respond.Store(yes) if !initial { p.l.Infof("punchy.respond changed to %v", p.GetRespond()) @@ -69,7 +60,7 @@ func (p *Punchy) reload(c *config.C, initial bool) { //NOTE: this will not apply to any in progress operations, only the next one if initial || c.HasChanged("punchy.delay") { - atomic.StoreInt64((*int64)(&p.atomicDelay), (int64)(c.GetDuration("punchy.delay", time.Second))) + p.delay.Store((int64)(c.GetDuration("punchy.delay", time.Second))) if !initial { p.l.Infof("punchy.delay changed to %s", p.GetDelay()) } @@ -77,13 +68,13 @@ func (p *Punchy) reload(c *config.C, initial bool) { } func (p *Punchy) GetPunch() bool { - return atomic.LoadInt32(&p.atomicPunch) == 1 + return p.punch.Load() } func (p *Punchy) GetRespond() bool { - return atomic.LoadInt32(&p.atomicRespond) == 1 + return p.respond.Load() } func (p *Punchy) GetDelay() time.Duration { - return (time.Duration)(atomic.LoadInt64((*int64)(&p.atomicDelay))) + return (time.Duration)(p.delay.Load()) } diff --git a/relay_manager.go b/relay_manager.go index 145e319..95807bd 100644 --- a/relay_manager.go +++ b/relay_manager.go @@ -13,9 +13,9 @@ import ( ) type relayManager struct { - l *logrus.Logger - hostmap *HostMap - atomicAmRelay int32 + l *logrus.Logger + hostmap *HostMap + amRelay atomic.Bool } func NewRelayManager(ctx context.Context, l *logrus.Logger, hostmap *HostMap, c *config.C) *relayManager { @@ -41,18 +41,11 @@ func (rm *relayManager) reload(c *config.C, initial bool) error { } func (rm *relayManager) GetAmRelay() bool { - return atomic.LoadInt32(&rm.atomicAmRelay) == 1 + return rm.amRelay.Load() } func (rm *relayManager) setAmRelay(v bool) { - var val int32 - switch v { - case true: - val = 1 - case false: - val = 0 - } - atomic.StoreInt32(&rm.atomicAmRelay, val) + rm.amRelay.Store(v) } // AddRelay finds an available relay index on the hostmap, and associates the relay info with it. diff --git a/remote_list.go b/remote_list.go index af66891..4b544f6 100644 --- a/remote_list.go +++ b/remote_list.go @@ -130,7 +130,7 @@ func (r *RemoteList) CopyAddrs(preferredRanges []*net.IPNet) []*udp.Addr { // LearnRemote locks and sets the learned slot for the owner vpn ip to the provided addr // Currently this is only needed when HostInfo.SetRemote is called as that should cover both handshaking and roaming. // It will mark the deduplicated address list as dirty, so do not call it unless new information is available -//TODO: this needs to support the allow list list +// TODO: this needs to support the allow list list func (r *RemoteList) LearnRemote(ownerVpnIp iputil.VpnIp, addr *udp.Addr) { r.Lock() defer r.Unlock() diff --git a/wintun/tun.go b/wintun/tun.go index a2dfe5e..c167e70 100644 --- a/wintun/tun.go +++ b/wintun/tun.go @@ -59,18 +59,14 @@ func procyield(cycles uint32) //go:linkname nanotime runtime.nanotime func nanotime() int64 -// // CreateTUN creates a Wintun interface with the given name. Should a Wintun // interface with the same name exist, it is reused. -// func CreateTUN(ifname string, mtu int) (Device, error) { return CreateTUNWithRequestedGUID(ifname, WintunStaticRequestedGUID, mtu) } -// // CreateTUNWithRequestedGUID creates a Wintun interface with the given name and // a requested GUID. Should a Wintun interface with the same name exist, it is reused. -// func CreateTUNWithRequestedGUID(ifname string, requestedGUID *windows.GUID, mtu int) (Device, error) { wt, err := wintun.CreateAdapter(ifname, WintunTunnelType, requestedGUID) if err != nil { From 85f5849d0b7be6de4a3d6f82209fa29f1e0b7a9c Mon Sep 17 00:00:00 2001 From: John Maguire Date: Fri, 11 Nov 2022 09:18:43 -0700 Subject: [PATCH 02/26] Fix a hang when shutting down Android (#772) --- overlay/tun_android.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/overlay/tun_android.go b/overlay/tun_android.go index 3bea907..321aec8 100644 --- a/overlay/tun_android.go +++ b/overlay/tun_android.go @@ -28,11 +28,13 @@ func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, _ int, routes return nil, err } + // XXX Android returns an fd in non-blocking mode which is necessary for shutdown to work properly. + // Be sure not to call file.Fd() as it will set the fd to blocking mode. file := os.NewFile(uintptr(deviceFd), "/dev/net/tun") return &tun{ ReadWriteCloser: file, - fd: int(file.Fd()), + fd: deviceFd, cidr: cidr, l: l, routeTree: routeTree, From 813b64ffb179e7bd5b9c5c02adc5d286e5bd8f5e Mon Sep 17 00:00:00 2001 From: brad-defined <77982333+brad-defined@users.noreply.github.com> Date: Tue, 15 Nov 2022 21:33:09 -0500 Subject: [PATCH 03/26] Remove unused variables from connection manager (#677) --- connection_manager.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/connection_manager.go b/connection_manager.go index 79471b8..a80a6c3 100644 --- a/connection_manager.go +++ b/connection_manager.go @@ -17,10 +17,8 @@ type connectionManager struct { hostMap *HostMap in map[iputil.VpnIp]struct{} inLock *sync.RWMutex - inCount int out map[iputil.VpnIp]struct{} outLock *sync.RWMutex - outCount int TrafficTimer *SystemTimerWheel intf *Interface @@ -40,10 +38,8 @@ func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface hostMap: intf.hostMap, in: make(map[iputil.VpnIp]struct{}), inLock: &sync.RWMutex{}, - inCount: 0, out: make(map[iputil.VpnIp]struct{}), outLock: &sync.RWMutex{}, - outCount: 0, TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), intf: intf, pendingDeletion: make(map[iputil.VpnIp]int), From 9a8892c52603f02f28406f57660ba2f7a9af4759 Mon Sep 17 00:00:00 2001 From: brad-defined <77982333+brad-defined@users.noreply.github.com> Date: Tue, 22 Nov 2022 21:55:27 -0500 Subject: [PATCH 04/26] Fix 756 SSH command line parsing error to write to user instead of stderr (#757) --- sshd/command.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/sshd/command.go b/sshd/command.go index 8296ef4..900b01e 100644 --- a/sshd/command.go +++ b/sshd/command.go @@ -40,8 +40,13 @@ func execCommand(c *Command, args []string, w StringWriter) error { if c.Flags != nil { fl, fs = c.Flags() if fl != nil { - //TODO: handle the error - fl.Parse(args) + // SetOutput() here in case fl.Parse dumps usage. + fl.SetOutput(w.GetWriter()) + err := fl.Parse(args) + if err != nil { + // fl.Parse has dumped error information to the user via the w writer. + return err + } args = fl.Args() } } From d4f9500ca5354fa06f1a307eb04ff047ed7446b5 Mon Sep 17 00:00:00 2001 From: Wade Simmons Date: Wed, 23 Nov 2022 10:46:41 -0500 Subject: [PATCH 05/26] Update dependencies (2022-11) (#780) * update dependencies Update to latest dependencies on Nov 21, 2022. Here are the diffs for deps that actually end up in the binaries (based on `go version -m`) Updated github.com/imdario/mergo https://github.com/imdario/mergo/compare/v0.3.12...v0.3.13 Updated github.com/matttproud/golang_protobuf_extensions https://github.com/matttproud/golang_protobuf_extensions/compare/v1.0.1...v1.0.4 Updated github.com/miekg/dns https://github.com/miekg/dns/compare/v1.1.48...v1.1.50 Updated github.com/prometheus/client_golang https://github.com/prometheus/client_golang/compare/v1.12.1...v1.14.0 Updated github.com/prometheus/client_model https://github.com/prometheus/client_model/compare/v0.2.0...v0.3.0 Updated github.com/prometheus/common https://github.com/prometheus/common/compare/v0.33.0...v0.37.0 Updated github.com/prometheus/procfs https://github.com/prometheus/procfs/compare/v0.7.3...v0.8.0 Updated github.com/sirupsen/logrus https://github.com/sirupsen/logrus/compare/v1.8.1...v1.9.0 Updated github.com/vishvananda/netns https://github.com/vishvananda/netns/compare/50045581ed74...v0.0.1 Updated golang.org/x/crypto https://github.com/golang/crypto/compare/ae2d96664a29...v0.3.0 Updated golang.org/x/net https://github.com/golang/net/compare/749bd193bc2b...v0.2.0 Updated golang.org/x/sys https://github.com/golang/sys/compare/289d7a0edf71...v0.2.0 Updated golang.org/x/term https://github.com/golang/term/compare/03fcf44c2211...v0.2.0 Updated google.golang.org/protobuf v1.28.0...v1.28.1 * test that mergo merges like we expect --- config/config_test.go | 77 ++++++++++++++++++++++++++++++++++ go.mod | 39 +++++++++-------- go.sum | 97 ++++++++++++++++++++----------------------- 3 files changed, 141 insertions(+), 72 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 8dfcbb8..52bf2e4 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -7,8 +7,11 @@ import ( "testing" "time" + "github.com/imdario/mergo" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" ) func TestConfig_Load(t *testing.T) { @@ -147,3 +150,77 @@ func TestConfig_ReloadConfig(t *testing.T) { } } + +// Ensure mergo merges are done the way we expect. +// This is needed to test for potential regressions, like: +// - https://github.com/imdario/mergo/issues/187 +func TestConfig_MergoMerge(t *testing.T) { + configs := [][]byte{ + []byte(` +listen: + port: 1234 +`), + []byte(` +firewall: + inbound: + - port: 443 + proto: tcp + groups: + - server + - port: 443 + proto: tcp + groups: + - webapp +`), + []byte(` +listen: + host: 0.0.0.0 + port: 4242 +firewall: + outbound: + - port: any + proto: any + host: any + inbound: + - port: any + proto: icmp + host: any +`), + } + + var m map[any]any + + // merge the same way config.parse() merges + for _, b := range configs { + var nm map[any]any + err := yaml.Unmarshal(b, &nm) + require.NoError(t, err) + + // We need to use WithAppendSlice so that firewall rules in separate + // files are appended together + err = mergo.Merge(&nm, m, mergo.WithAppendSlice) + m = nm + require.NoError(t, err) + } + + t.Logf("Merged Config: %#v", m) + mYaml, err := yaml.Marshal(m) + require.NoError(t, err) + t.Logf("Merged Config as YAML:\n%s", mYaml) + + // If a bug is present, some items might be replaced instead of merged like we expect + expected := map[any]any{ + "firewall": map[any]any{ + "inbound": []any{ + map[any]any{"host": "any", "port": "any", "proto": "icmp"}, + map[any]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"}, + map[any]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}}, + "outbound": []any{ + map[any]any{"host": "any", "port": "any", "proto": "any"}}}, + "listen": map[any]any{ + "host": "0.0.0.0", + "port": 4242, + }, + } + assert.Equal(t, expected, m) +} diff --git a/go.mod b/go.mod index 5e7393e..8e8a354 100644 --- a/go.mod +++ b/go.mod @@ -9,23 +9,23 @@ require ( github.com/flynn/noise v1.0.0 github.com/gogo/protobuf v1.3.2 github.com/google/gopacket v1.1.19 - github.com/imdario/mergo v0.3.8 - github.com/kardianos/service v1.2.1 - github.com/miekg/dns v1.1.48 + github.com/imdario/mergo v0.3.13 + github.com/kardianos/service v1.2.2 + github.com/miekg/dns v1.1.50 github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f - github.com/prometheus/client_golang v1.12.1 + github.com/prometheus/client_golang v1.14.0 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 - github.com/sirupsen/logrus v1.8.1 + github.com/sirupsen/logrus v1.9.0 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.8.1 github.com/vishvananda/netlink v1.1.0 - golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 - golang.org/x/net v0.0.0-20220403103023-749bd193bc2b - golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 + golang.org/x/crypto v0.3.0 + golang.org/x/net v0.2.0 + golang.org/x/sys v0.2.0 golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 golang.zx2c4.com/wireguard/windows v0.5.3 - google.golang.org/protobuf v1.28.0 + google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.4.0 ) @@ -34,15 +34,14 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.33.0 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/tools v0.1.10 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/vishvananda/netns v0.0.1 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/term v0.2.0 // indirect + golang.org/x/tools v0.3.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index d5fc44e..3c5eaa7 100644 --- a/go.sum +++ b/go.sum @@ -119,8 +119,8 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= @@ -139,8 +139,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -150,8 +150,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= -github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= +github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60= +github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -163,12 +163,11 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ= -github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.48 h1:Ucfr7IIVyMBz4lRE8qmGUuZ4Wt3/ZGu9hmcMT3Uu4tQ= -github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -187,57 +186,62 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= -github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.1 h1:JDkWS7Axy5ziNM3svylLhpSgqjPDb+BgVUbXoDo+iPw= +github.com/vishvananda/netns v0.0.1/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -250,10 +254,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o= -golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -285,8 +287,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -320,14 +322,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0= -golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -345,8 +343,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -361,7 +359,6 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -383,7 +380,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -392,16 +388,15 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 h1:PRD0hj6tTuUnCFD08vkvjkYFbQg/9lV8KIxe1y4/cvU= -golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -409,7 +404,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -456,13 +450,11 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY= golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= @@ -543,8 +535,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -560,8 +552,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 022ae83a4a3295eb7a178007b88cec6525633d3d Mon Sep 17 00:00:00 2001 From: Alexander Averyanov Date: Mon, 28 Nov 2022 21:59:57 +0300 Subject: [PATCH 06/26] Fix typo: my -> may (#758) --- examples/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/config.yml b/examples/config.yml index 0e3d3e1..bea132a 100644 --- a/examples/config.yml +++ b/examples/config.yml @@ -58,7 +58,7 @@ lighthouse: #"10.0.0.0/8": false #"10.42.42.0/24": true - # EXPERIMENTAL: This option my change or disappear in the future. + # EXPERIMENTAL: This option may change or disappear in the future. # Optionally allows the definition of remote_allow_list blocks # specific to an inside VPN IP CIDR. #remote_allow_ranges: From 77769de1e61786d0c6c71ec7b178c2fd32324bb9 Mon Sep 17 00:00:00 2001 From: Ian VanSchooten Date: Tue, 29 Nov 2022 11:32:43 -0500 Subject: [PATCH 07/26] Docs: Update doc links (#751) * Update documentation links * Update links --- .github/ISSUE_TEMPLATE/config.yml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index b0440da..84a191c 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,7 +1,7 @@ blank_issues_enabled: true contact_links: - name: 📘 Documentation - url: https://www.defined.net/nebula/ + url: https://nebula.defined.net/docs/ about: Review documentation. - name: 💁 Support/Chat diff --git a/README.md b/README.md index 78861ee..cc79f6d 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ and tunneling, and each of those individual pieces existed before Nebula in vari What makes Nebula different to existing offerings is that it brings all of these ideas together, resulting in a sum that is greater than its individual parts. -Further documentation can be found [here](https://www.defined.net/nebula/). +Further documentation can be found [here](https://nebula.defined.net/docs/). You can read more about Nebula [here](https://medium.com/p/884110a5579). From ec48298fe8313573f71e60330bb5993176619961 Mon Sep 17 00:00:00 2001 From: John Maguire Date: Wed, 7 Dec 2022 12:38:56 -0500 Subject: [PATCH 08/26] Update config to show aes cipher instead of chacha (#788) --- examples/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/config.yml b/examples/config.yml index bea132a..73c07bf 100644 --- a/examples/config.yml +++ b/examples/config.yml @@ -133,7 +133,7 @@ punchy: # Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes # IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously! -#cipher: chachapoly +#cipher: aes # Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest # path to a network adjacent nebula node. From 12dbbd3dd34e7ecc026fdc0a37e565ad37dcc835 Mon Sep 17 00:00:00 2001 From: Caleb Jasik Date: Mon, 19 Dec 2022 11:28:27 -0600 Subject: [PATCH 09/26] Fix typos found by https://github.com/crate-ci/typos (#735) --- control.go | 2 +- handshake_manager.go | 2 +- outside.go | 2 +- ssh.go | 2 +- stats.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/control.go b/control.go index 2e7ffee..adc2a48 100644 --- a/control.go +++ b/control.go @@ -61,7 +61,7 @@ func (c *Control) Start() { // Stop signals nebula to shutdown, returns after the shutdown is complete func (c *Control) Stop() { - // Stop the handshakeManager (and other serivces), to prevent new tunnels from + // Stop the handshakeManager (and other services), to prevent new tunnels from // being created while we're shutting them all down. c.cancel() diff --git a/handshake_manager.go b/handshake_manager.go index cda1509..4cb9c39 100644 --- a/handshake_manager.go +++ b/handshake_manager.go @@ -147,7 +147,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l // Get a remotes object if we don't already have one. // This is mainly to protect us as this should never be the case - // NB ^ This comment doesn't jive. It's how the thing gets intiailized. + // NB ^ This comment doesn't jive. It's how the thing gets initialized. // It's the common path. Should it update every time, in case a future LH query/queries give us more info? if hostinfo.remotes == nil { hostinfo.remotes = c.lightHouse.QueryCache(vpnIp) diff --git a/outside.go b/outside.go index 1a1ad6f..f64815a 100644 --- a/outside.go +++ b/outside.go @@ -93,7 +93,7 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by hostinfo.logger(f.l).WithField("hostinfo", hostinfo.vpnIp).WithField("remoteIndex", h.RemoteIndex).Errorf("HostInfo missing remote index") // Delete my local index from the hostmap f.hostMap.DeleteRelayIdx(h.RemoteIndex) - // When the peer doesn't recieve any return traffic, its connection_manager will eventually clean up + // When the peer doesn't receive any return traffic, its connection_manager will eventually clean up // the broken relay when it cleans up the associated HostInfo object. return } diff --git a/ssh.go b/ssh.go index 376dd82..f8050ff 100644 --- a/ssh.go +++ b/ssh.go @@ -805,7 +805,7 @@ func sshPrintRelays(ifce *Interface, fs interface{}, a []string, w sshd.StringWr case TerminalType: t = "terminal" default: - t = "unkown" + t = "unknown" } s := "" diff --git a/stats.go b/stats.go index 3993455..03b4d81 100644 --- a/stats.go +++ b/stats.go @@ -18,7 +18,7 @@ import ( "github.com/slackhq/nebula/config" ) -// startStats initializes stats from config. On success, if any futher work +// startStats initializes stats from config. On success, if any further work // is needed to serve stats, it returns a func to handle that work. If no // work is needed, it'll return nil. On failure, it returns nil, error. func startStats(l *logrus.Logger, c *config.C, buildVersion string, configTest bool) (func(), error) { From ad7222509dc37e6256c9acd6065ca650d275de2a Mon Sep 17 00:00:00 2001 From: John Maguire Date: Mon, 19 Dec 2022 12:28:49 -0500 Subject: [PATCH 10/26] Add a link to mobile nebula in the new issue form (#790) --- .github/ISSUE_TEMPLATE/config.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 84a191c..94e2c6b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -7,3 +7,7 @@ contact_links: - name: 💁 Support/Chat url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU about: 'This issue tracker is not for support questions. Join us on Slack for assistance!' + + - name: 📱 Mobile Nebula + url: https://github.com/definednet/mobile_nebula + about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!' From a3e6edf9c783fdfbe24c294a49718b0b3b07a2ce Mon Sep 17 00:00:00 2001 From: John Maguire Date: Mon, 19 Dec 2022 12:45:15 -0500 Subject: [PATCH 11/26] Use config.yml consistently (not config.yaml) (#789) --- README.md | 4 ++-- cmd/nebula-service/service.go | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cc79f6d..0b110a7 100644 --- a/README.md +++ b/README.md @@ -93,13 +93,13 @@ Download a copy of the nebula [example configuration](https://github.com/slackhq #### 6. Copy nebula credentials, configuration, and binaries to each host -For each host, copy the nebula binary to the host, along with `config.yaml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4. +For each host, copy the nebula binary to the host, along with `config.yml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4. **DO NOT COPY `ca.key` TO INDIVIDUAL NODES.** #### 7. Run nebula on each host ``` -./nebula -config /path/to/config.yaml +./nebula -config /path/to/config.yml ``` ## Building Nebula from source diff --git a/cmd/nebula-service/service.go b/cmd/nebula-service/service.go index 591e8e7..a54fb0f 100644 --- a/cmd/nebula-service/service.go +++ b/cmd/nebula-service/service.go @@ -49,6 +49,14 @@ func (p *program) Stop(s service.Service) error { return nil } +func fileExists(filename string) bool { + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return true +} + func doService(configPath *string, configTest *bool, build string, serviceFlag *string) { if *configPath == "" { ex, err := os.Executable() @@ -56,6 +64,9 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag * panic(err) } *configPath = filepath.Dir(ex) + "/config.yaml" + if !fileExists(*configPath) { + *configPath = filepath.Dir(ex) + "/config.yml" + } } svcConfig := &service.Config{ From cb2ec861eaba4cf583fe3274cffdd955a772837c Mon Sep 17 00:00:00 2001 From: Fabio Alessandro Locati <77888+Fale@users.noreply.github.com> Date: Mon, 19 Dec 2022 20:40:53 +0100 Subject: [PATCH 12/26] Nebula is now in Fedora official repositories (#719) --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 0b110a7..ca54606 100644 --- a/README.md +++ b/README.md @@ -31,9 +31,8 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for ``` $ sudo pacman -S nebula ``` -- [Fedora Linux](https://copr.fedorainfracloud.org/coprs/jdoss/nebula/) +- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula) ``` - $ sudo dnf copr enable jdoss/nebula $ sudo dnf install nebula ``` From 3ae242fa5f1858e5b229683ce021e8e6702cc8e7 Mon Sep 17 00:00:00 2001 From: Fabio Alessandro Locati <77888+Fale@users.noreply.github.com> Date: Mon, 19 Dec 2022 20:42:07 +0100 Subject: [PATCH 13/26] Add nss-lookup to the systemd wants (#791) * Add nss-lookup to the systemd wants to ensure DNS is running before starting nebula * Add Ansible & example service scripts * Fix #797 * Align Ansible scripts and examples Co-authored-by: John Maguire --- dist/arch/nebula.service | 4 ++-- dist/fedora/nebula.service | 5 ++--- .../ansible/roles/nebula/files/systemd.nebula.service | 7 ++++--- examples/service_scripts/nebula.service | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/dist/arch/nebula.service b/dist/arch/nebula.service index d8703c8..7e5335a 100644 --- a/dist/arch/nebula.service +++ b/dist/arch/nebula.service @@ -1,6 +1,6 @@ [Unit] -Description=nebula -Wants=basic.target network-online.target +Description=Nebula overlay networking tool +Wants=basic.target network-online.target nss-lookup.target time-sync.target After=basic.target network.target network-online.target [Service] diff --git a/dist/fedora/nebula.service b/dist/fedora/nebula.service index df4baf6..21a99c5 100644 --- a/dist/fedora/nebula.service +++ b/dist/fedora/nebula.service @@ -1,15 +1,14 @@ [Unit] Description=Nebula overlay networking tool - +Wants=basic.target network-online.target nss-lookup.target time-sync.target After=basic.target network.target network-online.target Before=sshd.service -Wants=basic.target network-online.target [Service] +SyslogIdentifier=nebula ExecReload=/bin/kill -HUP $MAINPID ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml Restart=always -SyslogIdentifier=nebula [Install] WantedBy=multi-user.target diff --git a/examples/quickstart-vagrant/ansible/roles/nebula/files/systemd.nebula.service b/examples/quickstart-vagrant/ansible/roles/nebula/files/systemd.nebula.service index c22b4a7..fd7a067 100644 --- a/examples/quickstart-vagrant/ansible/roles/nebula/files/systemd.nebula.service +++ b/examples/quickstart-vagrant/ansible/roles/nebula/files/systemd.nebula.service @@ -1,7 +1,8 @@ [Unit] -Description=nebula -Wants=basic.target -After=basic.target network.target +Description=Nebula overlay networking tool +Wants=basic.target network-online.target nss-lookup.target time-sync.target +After=basic.target network.target network-online.target +Before=sshd.service [Service] SyslogIdentifier=nebula diff --git a/examples/service_scripts/nebula.service b/examples/service_scripts/nebula.service index 19f3900..fd7a067 100644 --- a/examples/service_scripts/nebula.service +++ b/examples/service_scripts/nebula.service @@ -1,7 +1,7 @@ [Unit] -Description=nebula -Wants=basic.target -After=basic.target network.target +Description=Nebula overlay networking tool +Wants=basic.target network-online.target nss-lookup.target time-sync.target +After=basic.target network.target network-online.target Before=sshd.service [Service] From b5a85a6eb8065bf68c03b0f288c9cacd3d2f8505 Mon Sep 17 00:00:00 2001 From: John Maguire Date: Tue, 20 Dec 2022 16:50:02 -0500 Subject: [PATCH 14/26] Update example config with IPv6 note for allow lists (#742) --- examples/config.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/config.yml b/examples/config.yml index 73c07bf..f214bf7 100644 --- a/examples/config.yml +++ b/examples/config.yml @@ -47,8 +47,9 @@ lighthouse: # allowed. You can provide CIDRs here with `true` to allow and `false` to # deny. The most specific CIDR rule applies to each remote. If all rules are # "allow", the default will be "deny", and vice-versa. If both "allow" and - # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the - # default. + # "deny" IPv4 rules are present, then you MUST set a rule for "0.0.0.0/0" as + # the default. Similarly if both "allow" and "deny" IPv6 rules are present, + # then you MUST set a rule for "::/0" as the default. #remote_allow_list: # Example to block IPs from this subnet from being used for remote IPs. #"172.16.0.0/12": false From ff54bfd9f3285a3601c70b6b3c9436e7e275b66e Mon Sep 17 00:00:00 2001 From: John Maguire Date: Tue, 20 Dec 2022 16:52:51 -0500 Subject: [PATCH 15/26] Add nebula-cert.exe and cert files to .gitignore (#722) --- .gitignore | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 5560418..0efb967 100644 --- a/.gitignore +++ b/.gitignore @@ -4,10 +4,14 @@ /nebula-arm6 /nebula-darwin /nebula.exe -/cert/*.crt -/cert/*.key +/nebula-cert.exe /coverage.out /cpu.pprof /build /*.tar.gz /e2e/mermaid/ +**.crt +**.key +**.pem +!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key +!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt From b7e73da943fee3d53adbfa728a2eef997e3ff8e0 Mon Sep 17 00:00:00 2001 From: John Maguire Date: Tue, 20 Dec 2022 16:53:56 -0500 Subject: [PATCH 16/26] Add note indicating modes have usage text (#794) --- cmd/nebula-cert/main.go | 2 ++ cmd/nebula-cert/main_test.go | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/nebula-cert/main.go b/cmd/nebula-cert/main.go index f814fc3..3fba40a 100644 --- a/cmd/nebula-cert/main.go +++ b/cmd/nebula-cert/main.go @@ -127,6 +127,8 @@ func help(err string, out io.Writer) { fmt.Fprintln(out, " "+signSummary()) fmt.Fprintln(out, " "+printSummary()) fmt.Fprintln(out, " "+verifySummary()) + fmt.Fprintln(out, "") + fmt.Fprintf(out, " To see usage for a given mode, use %s -h\n", os.Args[0]) } func mustFlagString(name string, val *string) error { diff --git a/cmd/nebula-cert/main_test.go b/cmd/nebula-cert/main_test.go index 07c7c07..3d0fa1b 100644 --- a/cmd/nebula-cert/main_test.go +++ b/cmd/nebula-cert/main_test.go @@ -22,7 +22,9 @@ func Test_help(t *testing.T) { " " + keygenSummary() + "\n" + " " + signSummary() + "\n" + " " + printSummary() + "\n" + - " " + verifySummary() + "\n" + " " + verifySummary() + "\n" + + "\n" + + " To see usage for a given mode, use " + os.Args[0] + " -h\n" ob := &bytes.Buffer{} From c44da3abeec3dede8460bdb51a06bc64b844a3ca Mon Sep 17 00:00:00 2001 From: John Maguire Date: Tue, 20 Dec 2022 16:59:11 -0500 Subject: [PATCH 17/26] Make DNS queries case insensitive (#793) --- dns_server.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dns_server.go b/dns_server.go index 60cceae..19bc5ce 100644 --- a/dns_server.go +++ b/dns_server.go @@ -4,6 +4,7 @@ import ( "fmt" "net" "strconv" + "strings" "sync" "github.com/miekg/dns" @@ -33,11 +34,10 @@ func newDnsRecords(hostMap *HostMap) *dnsRecords { func (d *dnsRecords) Query(data string) string { d.RLock() - if r, ok := d.dnsMap[data]; ok { - d.RUnlock() + defer d.RUnlock() + if r, ok := d.dnsMap[strings.ToLower(data)]; ok { return r } - d.RUnlock() return "" } @@ -62,8 +62,8 @@ func (d *dnsRecords) QueryCert(data string) string { func (d *dnsRecords) Add(host, data string) { d.Lock() - d.dnsMap[host] = data - d.Unlock() + defer d.Unlock() + d.dnsMap[strings.ToLower(host)] = data } func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) { From c177126ed0204aa101c44be4ef22148ebd2c781b Mon Sep 17 00:00:00 2001 From: Nate Brown Date: Wed, 11 Jan 2023 19:35:19 -0600 Subject: [PATCH 18/26] Fix possible panic in the timerwheels (#802) --- firewall_test.go | 12 ++++++------ timeout.go | 13 +++++++------ timeout_system.go | 13 +++++++------ timeout_system_test.go | 37 +++++++++++++++++++++++++++++-------- timeout_test.go | 37 +++++++++++++++++++++++++++++-------- 5 files changed, 78 insertions(+), 34 deletions(-) diff --git a/firewall_test.go b/firewall_test.go index ce6ba18..4f24ac0 100644 --- a/firewall_test.go +++ b/firewall_test.go @@ -34,27 +34,27 @@ func TestNewFirewall(t *testing.T) { assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) - assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) + assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) - assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) + assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) - assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) + assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) - assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) + assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) - assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) + assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) - assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) + assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) } func TestFirewall_AddRule(t *testing.T) { diff --git a/timeout.go b/timeout.go index fe63f3e..6d8f68b 100644 --- a/timeout.go +++ b/timeout.go @@ -36,19 +36,19 @@ type TimerWheel struct { itemsCached int } -// Represents a tick in the wheel +// TimeoutList Represents a tick in the wheel type TimeoutList struct { Head *TimeoutItem Tail *TimeoutItem } -// Represents an item within a tick +// TimeoutItem Represents an item within a tick type TimeoutItem struct { Packet firewall.Packet Next *TimeoutItem } -// Builds a timer wheel and identifies the tick duration and wheel duration from the provided values +// NewTimerWheel Builds a timer wheel and identifies the tick duration and wheel duration from the provided values // Purge must be called once per entry to actually remove anything func NewTimerWheel(min, max time.Duration) *TimerWheel { //TODO provide an error @@ -56,9 +56,10 @@ func NewTimerWheel(min, max time.Duration) *TimerWheel { // return nil //} - // Round down and add 1 so we can have the smallest # of ticks in the wheel and still account for a full - // max duration - wLen := int((max / min) + 1) + // Round down and add 2 so we can have the smallest # of ticks in the wheel and still account for a full + // max duration, even if our current tick is at the maximum position and the next item to be added is at maximum + // timeout + wLen := int((max / min) + 2) tw := TimerWheel{ wheelLen: wLen, diff --git a/timeout_system.go b/timeout_system.go index 72f6af9..c39d9cd 100644 --- a/timeout_system.go +++ b/timeout_system.go @@ -37,19 +37,19 @@ type SystemTimerWheel struct { lock sync.Mutex } -// Represents a tick in the wheel +// SystemTimeoutList Represents a tick in the wheel type SystemTimeoutList struct { Head *SystemTimeoutItem Tail *SystemTimeoutItem } -// Represents an item within a tick +// SystemTimeoutItem Represents an item within a tick type SystemTimeoutItem struct { Item iputil.VpnIp Next *SystemTimeoutItem } -// Builds a timer wheel and identifies the tick duration and wheel duration from the provided values +// NewSystemTimerWheel Builds a timer wheel and identifies the tick duration and wheel duration from the provided values // Purge must be called once per entry to actually remove anything func NewSystemTimerWheel(min, max time.Duration) *SystemTimerWheel { //TODO provide an error @@ -57,9 +57,10 @@ func NewSystemTimerWheel(min, max time.Duration) *SystemTimerWheel { // return nil //} - // Round down and add 1 so we can have the smallest # of ticks in the wheel and still account for a full - // max duration - wLen := int((max / min) + 1) + // Round down and add 2 so we can have the smallest # of ticks in the wheel and still account for a full + // max duration, even if our current tick is at the maximum position and the next item to be added is at maximum + // timeout + wLen := int((max / min) + 2) tw := SystemTimerWheel{ wheelLen: wLen, diff --git a/timeout_system_test.go b/timeout_system_test.go index 41c64a0..ba3c22b 100644 --- a/timeout_system_test.go +++ b/timeout_system_test.go @@ -12,24 +12,24 @@ import ( func TestNewSystemTimerWheel(t *testing.T) { // Make sure we get an object we expect tw := NewSystemTimerWheel(time.Second, time.Second*10) - assert.Equal(t, 11, tw.wheelLen) + assert.Equal(t, 12, tw.wheelLen) assert.Equal(t, 0, tw.current) assert.Nil(t, tw.lastTick) assert.Equal(t, time.Second*1, tw.tickDuration) assert.Equal(t, time.Second*10, tw.wheelDuration) - assert.Len(t, tw.wheel, 11) + assert.Len(t, tw.wheel, 12) // Assert the math is correct tw = NewSystemTimerWheel(time.Second*3, time.Second*10) - assert.Equal(t, 4, tw.wheelLen) + assert.Equal(t, 5, tw.wheelLen) tw = NewSystemTimerWheel(time.Second*120, time.Minute*10) - assert.Equal(t, 6, tw.wheelLen) + assert.Equal(t, 7, tw.wheelLen) } func TestSystemTimerWheel_findWheel(t *testing.T) { tw := NewSystemTimerWheel(time.Second, time.Second*10) - assert.Len(t, tw.wheel, 11) + assert.Len(t, tw.wheel, 12) // Current + tick + 1 since we don't know how far into current we are assert.Equal(t, 2, tw.findWheel(time.Second*1)) @@ -38,15 +38,32 @@ func TestSystemTimerWheel_findWheel(t *testing.T) { assert.Equal(t, 2, tw.findWheel(time.Millisecond*1)) // Make sure we hit that last index - assert.Equal(t, 0, tw.findWheel(time.Second*10)) + assert.Equal(t, 11, tw.findWheel(time.Second*10)) // Scale down to max duration - assert.Equal(t, 0, tw.findWheel(time.Second*11)) + assert.Equal(t, 11, tw.findWheel(time.Second*11)) tw.current = 1 // Make sure we account for the current position properly assert.Equal(t, 3, tw.findWheel(time.Second*1)) - assert.Equal(t, 1, tw.findWheel(time.Second*10)) + assert.Equal(t, 0, tw.findWheel(time.Second*10)) + + // Ensure that all configurations of a wheel does not result in calculating an overflow of the wheel + for min := time.Duration(1); min < 100; min++ { + for max := min; max < 100; max++ { + tw = NewSystemTimerWheel(min, max) + + for current := 0; current < tw.wheelLen; current++ { + tw.current = current + for timeout := time.Duration(0); timeout <= tw.wheelDuration; timeout++ { + tick := tw.findWheel(timeout) + if tick >= tw.wheelLen { + t.Errorf("Min: %v; Max: %v; Wheel len: %v; Current Tick: %v; Insert timeout: %v; Calc tick: %v", min, max, tw.wheelLen, current, timeout, tick) + } + } + } + } + } } func TestSystemTimerWheel_Add(t *testing.T) { @@ -129,6 +146,10 @@ func TestSystemTimerWheel_Purge(t *testing.T) { tw.advance(ta) assert.Equal(t, 10, tw.current) + ta = ta.Add(time.Second * 1) + tw.advance(ta) + assert.Equal(t, 11, tw.current) + ta = ta.Add(time.Second * 1) tw.advance(ta) assert.Equal(t, 0, tw.current) diff --git a/timeout_test.go b/timeout_test.go index 9678b35..70b107c 100644 --- a/timeout_test.go +++ b/timeout_test.go @@ -11,24 +11,24 @@ import ( func TestNewTimerWheel(t *testing.T) { // Make sure we get an object we expect tw := NewTimerWheel(time.Second, time.Second*10) - assert.Equal(t, 11, tw.wheelLen) + assert.Equal(t, 12, tw.wheelLen) assert.Equal(t, 0, tw.current) assert.Nil(t, tw.lastTick) assert.Equal(t, time.Second*1, tw.tickDuration) assert.Equal(t, time.Second*10, tw.wheelDuration) - assert.Len(t, tw.wheel, 11) + assert.Len(t, tw.wheel, 12) // Assert the math is correct tw = NewTimerWheel(time.Second*3, time.Second*10) - assert.Equal(t, 4, tw.wheelLen) + assert.Equal(t, 5, tw.wheelLen) tw = NewTimerWheel(time.Second*120, time.Minute*10) - assert.Equal(t, 6, tw.wheelLen) + assert.Equal(t, 7, tw.wheelLen) } func TestTimerWheel_findWheel(t *testing.T) { tw := NewTimerWheel(time.Second, time.Second*10) - assert.Len(t, tw.wheel, 11) + assert.Len(t, tw.wheel, 12) // Current + tick + 1 since we don't know how far into current we are assert.Equal(t, 2, tw.findWheel(time.Second*1)) @@ -37,15 +37,15 @@ func TestTimerWheel_findWheel(t *testing.T) { assert.Equal(t, 2, tw.findWheel(time.Millisecond*1)) // Make sure we hit that last index - assert.Equal(t, 0, tw.findWheel(time.Second*10)) + assert.Equal(t, 11, tw.findWheel(time.Second*10)) // Scale down to max duration - assert.Equal(t, 0, tw.findWheel(time.Second*11)) + assert.Equal(t, 11, tw.findWheel(time.Second*11)) tw.current = 1 // Make sure we account for the current position properly assert.Equal(t, 3, tw.findWheel(time.Second*1)) - assert.Equal(t, 1, tw.findWheel(time.Second*10)) + assert.Equal(t, 0, tw.findWheel(time.Second*10)) } func TestTimerWheel_Add(t *testing.T) { @@ -75,6 +75,23 @@ func TestTimerWheel_Add(t *testing.T) { tw.Add(fp2, time.Second*1) assert.Nil(t, tw.itemCache) assert.Equal(t, 0, tw.itemsCached) + + // Ensure that all configurations of a wheel does not result in calculating an overflow of the wheel + for min := time.Duration(1); min < 100; min++ { + for max := min; max < 100; max++ { + tw = NewTimerWheel(min, max) + + for current := 0; current < tw.wheelLen; current++ { + tw.current = current + for timeout := time.Duration(0); timeout <= tw.wheelDuration; timeout++ { + tick := tw.findWheel(timeout) + if tick >= tw.wheelLen { + t.Errorf("Min: %v; Max: %v; Wheel len: %v; Current Tick: %v; Insert timeout: %v; Calc tick: %v", min, max, tw.wheelLen, current, timeout, tick) + } + } + } + } + } } func TestTimerWheel_Purge(t *testing.T) { @@ -134,6 +151,10 @@ func TestTimerWheel_Purge(t *testing.T) { tw.advance(ta) assert.Equal(t, 10, tw.current) + ta = ta.Add(time.Second * 1) + tw.advance(ta) + assert.Equal(t, 11, tw.current) + ta = ta.Add(time.Second * 1) tw.advance(ta) assert.Equal(t, 0, tw.current) From 5278b6f926c55d5c0c9e864a40f634952cb1909c Mon Sep 17 00:00:00 2001 From: Nate Brown Date: Wed, 18 Jan 2023 10:56:42 -0600 Subject: [PATCH 19/26] Generic timerwheel (#804) --- connection_manager.go | 24 ++--- firewall.go | 6 +- handshake_manager.go | 11 +-- handshake_manager_test.go | 4 +- timeout.go | 95 ++++++++++++------ timeout_system.go | 199 -------------------------------------- timeout_system_test.go | 156 ------------------------------ timeout_test.go | 52 ++++++---- 8 files changed, 116 insertions(+), 431 deletions(-) delete mode 100644 timeout_system.go delete mode 100644 timeout_system_test.go diff --git a/connection_manager.go b/connection_manager.go index a80a6c3..82167ea 100644 --- a/connection_manager.go +++ b/connection_manager.go @@ -19,12 +19,12 @@ type connectionManager struct { inLock *sync.RWMutex out map[iputil.VpnIp]struct{} outLock *sync.RWMutex - TrafficTimer *SystemTimerWheel + TrafficTimer *LockingTimerWheel[iputil.VpnIp] intf *Interface pendingDeletion map[iputil.VpnIp]int pendingDeletionLock *sync.RWMutex - pendingDeletionTimer *SystemTimerWheel + pendingDeletionTimer *LockingTimerWheel[iputil.VpnIp] checkInterval int pendingDeletionInterval int @@ -40,11 +40,11 @@ func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface inLock: &sync.RWMutex{}, out: make(map[iputil.VpnIp]struct{}), outLock: &sync.RWMutex{}, - TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), + TrafficTimer: NewLockingTimerWheel[iputil.VpnIp](time.Millisecond*500, time.Second*60), intf: intf, pendingDeletion: make(map[iputil.VpnIp]int), pendingDeletionLock: &sync.RWMutex{}, - pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), + pendingDeletionTimer: NewLockingTimerWheel[iputil.VpnIp](time.Millisecond*500, time.Second*60), checkInterval: checkInterval, pendingDeletionInterval: pendingDeletionInterval, l: l, @@ -160,15 +160,13 @@ func (n *connectionManager) Run(ctx context.Context) { } func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) { - n.TrafficTimer.advance(now) + n.TrafficTimer.Advance(now) for { - ep := n.TrafficTimer.Purge() - if ep == nil { + vpnIp, has := n.TrafficTimer.Purge() + if !has { break } - vpnIp := ep.(iputil.VpnIp) - // Check for traffic coming back in from this host. traf := n.CheckIn(vpnIp) @@ -214,15 +212,13 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) } func (n *connectionManager) HandleDeletionTick(now time.Time) { - n.pendingDeletionTimer.advance(now) + n.pendingDeletionTimer.Advance(now) for { - ep := n.pendingDeletionTimer.Purge() - if ep == nil { + vpnIp, has := n.pendingDeletionTimer.Purge() + if !has { break } - vpnIp := ep.(iputil.VpnIp) - hostinfo, err := n.hostMap.QueryVpnIp(vpnIp) if err != nil { n.l.Debugf("Not found in hostmap: %s", vpnIp) diff --git a/firewall.go b/firewall.go index 99b18f8..9fd75fc 100644 --- a/firewall.go +++ b/firewall.go @@ -77,7 +77,7 @@ type FirewallConntrack struct { sync.Mutex Conns map[firewall.Packet]*conn - TimerWheel *TimerWheel + TimerWheel *TimerWheel[firewall.Packet] } type FirewallTable struct { @@ -145,7 +145,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D return &Firewall{ Conntrack: &FirewallConntrack{ Conns: make(map[firewall.Packet]*conn), - TimerWheel: NewTimerWheel(min, max), + TimerWheel: NewTimerWheel[firewall.Packet](min, max), }, InRules: newFirewallTable(), OutRules: newFirewallTable(), @@ -510,6 +510,7 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) { conntrack := f.Conntrack conntrack.Lock() if _, ok := conntrack.Conns[fp]; !ok { + conntrack.TimerWheel.Advance(time.Now()) conntrack.TimerWheel.Add(fp, timeout) } @@ -537,6 +538,7 @@ func (f *Firewall) evict(p firewall.Packet) { // Timeout is in the future, re-add the timer if newT > 0 { + conntrack.TimerWheel.Advance(time.Now()) conntrack.TimerWheel.Add(p, newT) return } diff --git a/handshake_manager.go b/handshake_manager.go index 4cb9c39..4325841 100644 --- a/handshake_manager.go +++ b/handshake_manager.go @@ -47,7 +47,7 @@ type HandshakeManager struct { lightHouse *LightHouse outside *udp.Conn config HandshakeConfig - OutboundHandshakeTimer *SystemTimerWheel + OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp] messageMetrics *MessageMetrics metricInitiated metrics.Counter metricTimedOut metrics.Counter @@ -65,7 +65,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [ outside: outside, config: config, trigger: make(chan iputil.VpnIp, config.triggerBuffer), - OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, hsTimeout(config.retries, config.tryInterval)), + OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)), messageMetrics: config.messageMetrics, metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil), metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil), @@ -90,13 +90,12 @@ func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) { } func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) { - c.OutboundHandshakeTimer.advance(now) + c.OutboundHandshakeTimer.Advance(now) for { - ep := c.OutboundHandshakeTimer.Purge() - if ep == nil { + vpnIp, has := c.OutboundHandshakeTimer.Purge() + if !has { break } - vpnIp := ep.(iputil.VpnIp) c.handleOutbound(vpnIp, f, false) } } diff --git a/handshake_manager_test.go b/handshake_manager_test.go index 5381b23..413a50a 100644 --- a/handshake_manager_test.go +++ b/handshake_manager_test.go @@ -106,8 +106,8 @@ func Test_NewHandshakeManagerTrigger(t *testing.T) { assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer)) } -func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) { - for _, i := range tw.wheel { +func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) { + for _, i := range tw.t.wheel { n := i.Head for n != nil { c++ diff --git a/timeout.go b/timeout.go index 6d8f68b..c1b4c39 100644 --- a/timeout.go +++ b/timeout.go @@ -1,17 +1,14 @@ package nebula import ( + "sync" "time" - - "github.com/slackhq/nebula/firewall" ) // How many timer objects should be cached const timerCacheMax = 50000 -var emptyFWPacket = firewall.Packet{} - -type TimerWheel struct { +type TimerWheel[T any] struct { // Current tick current int @@ -26,31 +23,38 @@ type TimerWheel struct { wheelDuration time.Duration // The actual wheel which is just a set of singly linked lists, head/tail pointers - wheel []*TimeoutList + wheel []*TimeoutList[T] // Singly linked list of items that have timed out of the wheel - expired *TimeoutList + expired *TimeoutList[T] // Item cache to avoid garbage collect - itemCache *TimeoutItem + itemCache *TimeoutItem[T] itemsCached int } +type LockingTimerWheel[T any] struct { + m sync.Mutex + t *TimerWheel[T] +} + // TimeoutList Represents a tick in the wheel -type TimeoutList struct { - Head *TimeoutItem - Tail *TimeoutItem +type TimeoutList[T any] struct { + Head *TimeoutItem[T] + Tail *TimeoutItem[T] } // TimeoutItem Represents an item within a tick -type TimeoutItem struct { - Packet firewall.Packet - Next *TimeoutItem +type TimeoutItem[T any] struct { + Item T + Next *TimeoutItem[T] } // NewTimerWheel Builds a timer wheel and identifies the tick duration and wheel duration from the provided values // Purge must be called once per entry to actually remove anything -func NewTimerWheel(min, max time.Duration) *TimerWheel { +// The TimerWheel does not handle concurrency on its own. +// Locks around access to it must be used if multiple routines are manipulating it. +func NewTimerWheel[T any](min, max time.Duration) *TimerWheel[T] { //TODO provide an error //if min >= max { // return nil @@ -61,26 +65,31 @@ func NewTimerWheel(min, max time.Duration) *TimerWheel { // timeout wLen := int((max / min) + 2) - tw := TimerWheel{ + tw := TimerWheel[T]{ wheelLen: wLen, - wheel: make([]*TimeoutList, wLen), + wheel: make([]*TimeoutList[T], wLen), tickDuration: min, wheelDuration: max, - expired: &TimeoutList{}, + expired: &TimeoutList[T]{}, } for i := range tw.wheel { - tw.wheel[i] = &TimeoutList{} + tw.wheel[i] = &TimeoutList[T]{} } return &tw } -// Add will add a firewall.Packet to the wheel in it's proper timeout -func (tw *TimerWheel) Add(v firewall.Packet, timeout time.Duration) *TimeoutItem { - // Check and see if we should progress the tick - tw.advance(time.Now()) +// NewLockingTimerWheel is version of TimerWheel that is safe for concurrent use with a small performance penalty +func NewLockingTimerWheel[T any](min, max time.Duration) *LockingTimerWheel[T] { + return &LockingTimerWheel[T]{ + t: NewTimerWheel[T](min, max), + } +} +// Add will add an item to the wheel in its proper timeout. +// Caller should Advance the wheel prior to ensure the proper slot is used. +func (tw *TimerWheel[T]) Add(v T, timeout time.Duration) *TimeoutItem[T] { i := tw.findWheel(timeout) // Try to fetch off the cache @@ -90,11 +99,11 @@ func (tw *TimerWheel) Add(v firewall.Packet, timeout time.Duration) *TimeoutItem tw.itemsCached-- ti.Next = nil } else { - ti = &TimeoutItem{} + ti = &TimeoutItem[T]{} } // Relink and return - ti.Packet = v + ti.Item = v if tw.wheel[i].Tail == nil { tw.wheel[i].Head = ti tw.wheel[i].Tail = ti @@ -106,9 +115,12 @@ func (tw *TimerWheel) Add(v firewall.Packet, timeout time.Duration) *TimeoutItem return ti } -func (tw *TimerWheel) Purge() (firewall.Packet, bool) { +// Purge removes and returns the first available expired item from the wheel and the 2nd argument is true. +// If no item is available then an empty T is returned and the 2nd argument is false. +func (tw *TimerWheel[T]) Purge() (T, bool) { if tw.expired.Head == nil { - return emptyFWPacket, false + var na T + return na, false } ti := tw.expired.Head @@ -128,11 +140,11 @@ func (tw *TimerWheel) Purge() (firewall.Packet, bool) { tw.itemsCached++ } - return ti.Packet, true + return ti.Item, true } -// advance will move the wheel forward by proper number of ticks. The caller _should_ lock the wheel before calling this -func (tw *TimerWheel) findWheel(timeout time.Duration) (i int) { +// findWheel find the next position in the wheel for the provided timeout given the current tick +func (tw *TimerWheel[T]) findWheel(timeout time.Duration) (i int) { if timeout < tw.tickDuration { // Can't track anything below the set resolution timeout = tw.tickDuration @@ -154,8 +166,9 @@ func (tw *TimerWheel) findWheel(timeout time.Duration) (i int) { return tick } -// advance will lock and move the wheel forward by proper number of ticks. -func (tw *TimerWheel) advance(now time.Time) { +// Advance will move the wheel forward by the appropriate number of ticks for the provided time and all items +// passed over will be moved to the expired list. Calling Purge is necessary to remove them entirely. +func (tw *TimerWheel[T]) Advance(now time.Time) { if tw.lastTick == nil { tw.lastTick = &now } @@ -192,3 +205,21 @@ func (tw *TimerWheel) advance(now time.Time) { newTick := tw.lastTick.Add(tw.tickDuration * time.Duration(adv)) tw.lastTick = &newTick } + +func (lw *LockingTimerWheel[T]) Add(v T, timeout time.Duration) *TimeoutItem[T] { + lw.m.Lock() + defer lw.m.Unlock() + return lw.t.Add(v, timeout) +} + +func (lw *LockingTimerWheel[T]) Purge() (T, bool) { + lw.m.Lock() + defer lw.m.Unlock() + return lw.t.Purge() +} + +func (lw *LockingTimerWheel[T]) Advance(now time.Time) { + lw.m.Lock() + defer lw.m.Unlock() + lw.t.Advance(now) +} diff --git a/timeout_system.go b/timeout_system.go deleted file mode 100644 index c39d9cd..0000000 --- a/timeout_system.go +++ /dev/null @@ -1,199 +0,0 @@ -package nebula - -import ( - "sync" - "time" - - "github.com/slackhq/nebula/iputil" -) - -// How many timer objects should be cached -const systemTimerCacheMax = 50000 - -type SystemTimerWheel struct { - // Current tick - current int - - // Cheat on finding the length of the wheel - wheelLen int - - // Last time we ticked, since we are lazy ticking - lastTick *time.Time - - // Durations of a tick and the entire wheel - tickDuration time.Duration - wheelDuration time.Duration - - // The actual wheel which is just a set of singly linked lists, head/tail pointers - wheel []*SystemTimeoutList - - // Singly linked list of items that have timed out of the wheel - expired *SystemTimeoutList - - // Item cache to avoid garbage collect - itemCache *SystemTimeoutItem - itemsCached int - - lock sync.Mutex -} - -// SystemTimeoutList Represents a tick in the wheel -type SystemTimeoutList struct { - Head *SystemTimeoutItem - Tail *SystemTimeoutItem -} - -// SystemTimeoutItem Represents an item within a tick -type SystemTimeoutItem struct { - Item iputil.VpnIp - Next *SystemTimeoutItem -} - -// NewSystemTimerWheel Builds a timer wheel and identifies the tick duration and wheel duration from the provided values -// Purge must be called once per entry to actually remove anything -func NewSystemTimerWheel(min, max time.Duration) *SystemTimerWheel { - //TODO provide an error - //if min >= max { - // return nil - //} - - // Round down and add 2 so we can have the smallest # of ticks in the wheel and still account for a full - // max duration, even if our current tick is at the maximum position and the next item to be added is at maximum - // timeout - wLen := int((max / min) + 2) - - tw := SystemTimerWheel{ - wheelLen: wLen, - wheel: make([]*SystemTimeoutList, wLen), - tickDuration: min, - wheelDuration: max, - expired: &SystemTimeoutList{}, - } - - for i := range tw.wheel { - tw.wheel[i] = &SystemTimeoutList{} - } - - return &tw -} - -func (tw *SystemTimerWheel) Add(v iputil.VpnIp, timeout time.Duration) *SystemTimeoutItem { - tw.lock.Lock() - defer tw.lock.Unlock() - - // Check and see if we should progress the tick - //tw.advance(time.Now()) - - i := tw.findWheel(timeout) - - // Try to fetch off the cache - ti := tw.itemCache - if ti != nil { - tw.itemCache = ti.Next - ti.Next = nil - tw.itemsCached-- - } else { - ti = &SystemTimeoutItem{} - } - - // Relink and return - ti.Item = v - ti.Next = tw.wheel[i].Head - tw.wheel[i].Head = ti - - if tw.wheel[i].Tail == nil { - tw.wheel[i].Tail = ti - } - - return ti -} - -func (tw *SystemTimerWheel) Purge() interface{} { - tw.lock.Lock() - defer tw.lock.Unlock() - - if tw.expired.Head == nil { - return nil - } - - ti := tw.expired.Head - tw.expired.Head = ti.Next - - if tw.expired.Head == nil { - tw.expired.Tail = nil - } - - p := ti.Item - - // Clear out the items references - ti.Item = 0 - ti.Next = nil - - // Maybe cache it for later - if tw.itemsCached < systemTimerCacheMax { - ti.Next = tw.itemCache - tw.itemCache = ti - tw.itemsCached++ - } - - return p -} - -func (tw *SystemTimerWheel) findWheel(timeout time.Duration) (i int) { - if timeout < tw.tickDuration { - // Can't track anything below the set resolution - timeout = tw.tickDuration - } else if timeout > tw.wheelDuration { - // We aren't handling timeouts greater than the wheels duration - timeout = tw.wheelDuration - } - - // Find the next highest, rounding up - tick := int(((timeout - 1) / tw.tickDuration) + 1) - - // Add another tick since the current tick may almost be over then map it to the wheel from our - // current position - tick += tw.current + 1 - if tick >= tw.wheelLen { - tick -= tw.wheelLen - } - - return tick -} - -func (tw *SystemTimerWheel) advance(now time.Time) { - tw.lock.Lock() - defer tw.lock.Unlock() - - if tw.lastTick == nil { - tw.lastTick = &now - } - - // We want to round down - ticks := int(now.Sub(*tw.lastTick) / tw.tickDuration) - //l.Infoln("Ticks: ", ticks) - for i := 0; i < ticks; i++ { - tw.current++ - //l.Infoln("Tick: ", tw.current) - if tw.current >= tw.wheelLen { - tw.current = 0 - } - - // We need to append the expired items as to not starve evicting the oldest ones - if tw.expired.Tail == nil { - tw.expired.Head = tw.wheel[tw.current].Head - tw.expired.Tail = tw.wheel[tw.current].Tail - } else { - tw.expired.Tail.Next = tw.wheel[tw.current].Head - if tw.wheel[tw.current].Tail != nil { - tw.expired.Tail = tw.wheel[tw.current].Tail - } - } - - //l.Infoln("Head: ", tw.expired.Head, "Tail: ", tw.expired.Tail) - tw.wheel[tw.current].Head = nil - tw.wheel[tw.current].Tail = nil - - tw.lastTick = &now - } -} diff --git a/timeout_system_test.go b/timeout_system_test.go deleted file mode 100644 index ba3c22b..0000000 --- a/timeout_system_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package nebula - -import ( - "net" - "testing" - "time" - - "github.com/slackhq/nebula/iputil" - "github.com/stretchr/testify/assert" -) - -func TestNewSystemTimerWheel(t *testing.T) { - // Make sure we get an object we expect - tw := NewSystemTimerWheel(time.Second, time.Second*10) - assert.Equal(t, 12, tw.wheelLen) - assert.Equal(t, 0, tw.current) - assert.Nil(t, tw.lastTick) - assert.Equal(t, time.Second*1, tw.tickDuration) - assert.Equal(t, time.Second*10, tw.wheelDuration) - assert.Len(t, tw.wheel, 12) - - // Assert the math is correct - tw = NewSystemTimerWheel(time.Second*3, time.Second*10) - assert.Equal(t, 5, tw.wheelLen) - - tw = NewSystemTimerWheel(time.Second*120, time.Minute*10) - assert.Equal(t, 7, tw.wheelLen) -} - -func TestSystemTimerWheel_findWheel(t *testing.T) { - tw := NewSystemTimerWheel(time.Second, time.Second*10) - assert.Len(t, tw.wheel, 12) - - // Current + tick + 1 since we don't know how far into current we are - assert.Equal(t, 2, tw.findWheel(time.Second*1)) - - // Scale up to min duration - assert.Equal(t, 2, tw.findWheel(time.Millisecond*1)) - - // Make sure we hit that last index - assert.Equal(t, 11, tw.findWheel(time.Second*10)) - - // Scale down to max duration - assert.Equal(t, 11, tw.findWheel(time.Second*11)) - - tw.current = 1 - // Make sure we account for the current position properly - assert.Equal(t, 3, tw.findWheel(time.Second*1)) - assert.Equal(t, 0, tw.findWheel(time.Second*10)) - - // Ensure that all configurations of a wheel does not result in calculating an overflow of the wheel - for min := time.Duration(1); min < 100; min++ { - for max := min; max < 100; max++ { - tw = NewSystemTimerWheel(min, max) - - for current := 0; current < tw.wheelLen; current++ { - tw.current = current - for timeout := time.Duration(0); timeout <= tw.wheelDuration; timeout++ { - tick := tw.findWheel(timeout) - if tick >= tw.wheelLen { - t.Errorf("Min: %v; Max: %v; Wheel len: %v; Current Tick: %v; Insert timeout: %v; Calc tick: %v", min, max, tw.wheelLen, current, timeout, tick) - } - } - } - } - } -} - -func TestSystemTimerWheel_Add(t *testing.T) { - tw := NewSystemTimerWheel(time.Second, time.Second*10) - - fp1 := iputil.Ip2VpnIp(net.ParseIP("1.2.3.4")) - tw.Add(fp1, time.Second*1) - - // Make sure we set head and tail properly - assert.NotNil(t, tw.wheel[2]) - assert.Equal(t, fp1, tw.wheel[2].Head.Item) - assert.Nil(t, tw.wheel[2].Head.Next) - assert.Equal(t, fp1, tw.wheel[2].Tail.Item) - assert.Nil(t, tw.wheel[2].Tail.Next) - - // Make sure we only modify head - fp2 := iputil.Ip2VpnIp(net.ParseIP("1.2.3.4")) - tw.Add(fp2, time.Second*1) - assert.Equal(t, fp2, tw.wheel[2].Head.Item) - assert.Equal(t, fp1, tw.wheel[2].Head.Next.Item) - assert.Equal(t, fp1, tw.wheel[2].Tail.Item) - assert.Nil(t, tw.wheel[2].Tail.Next) - - // Make sure we use free'd items first - tw.itemCache = &SystemTimeoutItem{} - tw.itemsCached = 1 - tw.Add(fp2, time.Second*1) - assert.Nil(t, tw.itemCache) - assert.Equal(t, 0, tw.itemsCached) -} - -func TestSystemTimerWheel_Purge(t *testing.T) { - // First advance should set the lastTick and do nothing else - tw := NewSystemTimerWheel(time.Second, time.Second*10) - assert.Nil(t, tw.lastTick) - tw.advance(time.Now()) - assert.NotNil(t, tw.lastTick) - assert.Equal(t, 0, tw.current) - - fps := []iputil.VpnIp{9, 10, 11, 12} - - //fp1 := ip2int(net.ParseIP("1.2.3.4")) - - tw.Add(fps[0], time.Second*1) - tw.Add(fps[1], time.Second*1) - tw.Add(fps[2], time.Second*2) - tw.Add(fps[3], time.Second*2) - - ta := time.Now().Add(time.Second * 3) - lastTick := *tw.lastTick - tw.advance(ta) - assert.Equal(t, 3, tw.current) - assert.True(t, tw.lastTick.After(lastTick)) - - // Make sure we get all 4 packets back - for i := 0; i < 4; i++ { - assert.Contains(t, fps, tw.Purge()) - } - - // Make sure there aren't any leftover - assert.Nil(t, tw.Purge()) - assert.Nil(t, tw.expired.Head) - assert.Nil(t, tw.expired.Tail) - - // Make sure we cached the free'd items - assert.Equal(t, 4, tw.itemsCached) - ci := tw.itemCache - for i := 0; i < 4; i++ { - assert.NotNil(t, ci) - ci = ci.Next - } - assert.Nil(t, ci) - - // Lets make sure we roll over properly - ta = ta.Add(time.Second * 5) - tw.advance(ta) - assert.Equal(t, 8, tw.current) - - ta = ta.Add(time.Second * 2) - tw.advance(ta) - assert.Equal(t, 10, tw.current) - - ta = ta.Add(time.Second * 1) - tw.advance(ta) - assert.Equal(t, 11, tw.current) - - ta = ta.Add(time.Second * 1) - tw.advance(ta) - assert.Equal(t, 0, tw.current) -} diff --git a/timeout_test.go b/timeout_test.go index 70b107c..3f81ff4 100644 --- a/timeout_test.go +++ b/timeout_test.go @@ -10,7 +10,7 @@ import ( func TestNewTimerWheel(t *testing.T) { // Make sure we get an object we expect - tw := NewTimerWheel(time.Second, time.Second*10) + tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10) assert.Equal(t, 12, tw.wheelLen) assert.Equal(t, 0, tw.current) assert.Nil(t, tw.lastTick) @@ -19,15 +19,27 @@ func TestNewTimerWheel(t *testing.T) { assert.Len(t, tw.wheel, 12) // Assert the math is correct - tw = NewTimerWheel(time.Second*3, time.Second*10) + tw = NewTimerWheel[firewall.Packet](time.Second*3, time.Second*10) assert.Equal(t, 5, tw.wheelLen) - tw = NewTimerWheel(time.Second*120, time.Minute*10) + tw = NewTimerWheel[firewall.Packet](time.Second*120, time.Minute*10) assert.Equal(t, 7, tw.wheelLen) + + // Test empty purge of non nil items + i, ok := tw.Purge() + assert.Equal(t, firewall.Packet{}, i) + assert.False(t, ok) + + // Test empty purges of nil items + tw2 := NewTimerWheel[*int](time.Second, time.Second*10) + i2, ok := tw2.Purge() + assert.Nil(t, i2) + assert.False(t, ok) + } func TestTimerWheel_findWheel(t *testing.T) { - tw := NewTimerWheel(time.Second, time.Second*10) + tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10) assert.Len(t, tw.wheel, 12) // Current + tick + 1 since we don't know how far into current we are @@ -49,28 +61,28 @@ func TestTimerWheel_findWheel(t *testing.T) { } func TestTimerWheel_Add(t *testing.T) { - tw := NewTimerWheel(time.Second, time.Second*10) + tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10) fp1 := firewall.Packet{} tw.Add(fp1, time.Second*1) // Make sure we set head and tail properly assert.NotNil(t, tw.wheel[2]) - assert.Equal(t, fp1, tw.wheel[2].Head.Packet) + assert.Equal(t, fp1, tw.wheel[2].Head.Item) assert.Nil(t, tw.wheel[2].Head.Next) - assert.Equal(t, fp1, tw.wheel[2].Tail.Packet) + assert.Equal(t, fp1, tw.wheel[2].Tail.Item) assert.Nil(t, tw.wheel[2].Tail.Next) // Make sure we only modify head fp2 := firewall.Packet{} tw.Add(fp2, time.Second*1) - assert.Equal(t, fp2, tw.wheel[2].Head.Packet) - assert.Equal(t, fp1, tw.wheel[2].Head.Next.Packet) - assert.Equal(t, fp1, tw.wheel[2].Tail.Packet) + assert.Equal(t, fp2, tw.wheel[2].Head.Item) + assert.Equal(t, fp1, tw.wheel[2].Head.Next.Item) + assert.Equal(t, fp1, tw.wheel[2].Tail.Item) assert.Nil(t, tw.wheel[2].Tail.Next) // Make sure we use free'd items first - tw.itemCache = &TimeoutItem{} + tw.itemCache = &TimeoutItem[firewall.Packet]{} tw.itemsCached = 1 tw.Add(fp2, time.Second*1) assert.Nil(t, tw.itemCache) @@ -79,7 +91,7 @@ func TestTimerWheel_Add(t *testing.T) { // Ensure that all configurations of a wheel does not result in calculating an overflow of the wheel for min := time.Duration(1); min < 100; min++ { for max := min; max < 100; max++ { - tw = NewTimerWheel(min, max) + tw = NewTimerWheel[firewall.Packet](min, max) for current := 0; current < tw.wheelLen; current++ { tw.current = current @@ -96,9 +108,9 @@ func TestTimerWheel_Add(t *testing.T) { func TestTimerWheel_Purge(t *testing.T) { // First advance should set the lastTick and do nothing else - tw := NewTimerWheel(time.Second, time.Second*10) + tw := NewTimerWheel[firewall.Packet](time.Second, time.Second*10) assert.Nil(t, tw.lastTick) - tw.advance(time.Now()) + tw.Advance(time.Now()) assert.NotNil(t, tw.lastTick) assert.Equal(t, 0, tw.current) @@ -116,7 +128,7 @@ func TestTimerWheel_Purge(t *testing.T) { ta := time.Now().Add(time.Second * 3) lastTick := *tw.lastTick - tw.advance(ta) + tw.Advance(ta) assert.Equal(t, 3, tw.current) assert.True(t, tw.lastTick.After(lastTick)) @@ -142,20 +154,20 @@ func TestTimerWheel_Purge(t *testing.T) { } assert.Nil(t, ci) - // Lets make sure we roll over properly + // Let's make sure we roll over properly ta = ta.Add(time.Second * 5) - tw.advance(ta) + tw.Advance(ta) assert.Equal(t, 8, tw.current) ta = ta.Add(time.Second * 2) - tw.advance(ta) + tw.Advance(ta) assert.Equal(t, 10, tw.current) ta = ta.Add(time.Second * 1) - tw.advance(ta) + tw.Advance(ta) assert.Equal(t, 11, tw.current) ta = ta.Add(time.Second * 1) - tw.advance(ta) + tw.Advance(ta) assert.Equal(t, 0, tw.current) } From 0fc4d8192f0bf60cb0288502826633145fddadba Mon Sep 17 00:00:00 2001 From: Tricia <95389395+asymmetricia@users.noreply.github.com> Date: Mon, 23 Jan 2023 11:05:35 -0800 Subject: [PATCH 20/26] log network as String to match the other log event in interface.go that emits network (#811) Co-authored-by: Tricia Bogen --- main.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/main.go b/main.go index 71bd233..99fe72c 100644 --- a/main.go +++ b/main.go @@ -202,7 +202,10 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg hostMap := NewHostMap(l, "main", tunCidr, preferredRanges) hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false) - l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created") + l. + WithField("network", hostMap.vpnCIDR.String()). + WithField("preferredRanges", hostMap.preferredRanges). + Info("Main HostMap created") /* config.SetDefault("promoter.interval", 10) From 5bd87129461ca150850dcc3a96a5fc20c37824af Mon Sep 17 00:00:00 2001 From: John Maguire Date: Mon, 23 Jan 2023 16:51:54 -0500 Subject: [PATCH 21/26] Immediately forward packets from self to self on FreeBSD (#808) --- inside.go | 5 +++-- inside_bsd.go | 6 ++++++ inside_darwin.go | 3 --- inside_generic.go | 4 ++-- 4 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 inside_bsd.go delete mode 100644 inside_darwin.go diff --git a/inside.go b/inside.go index 177bcd3..5ed152c 100644 --- a/inside.go +++ b/inside.go @@ -25,8 +25,9 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet if fwPacket.RemoteIP == f.myVpnIp { // Immediately forward packets from self to self. - // This should only happen on Darwin-based hosts, which routes packets from - // the Nebula IP to the Nebula IP through the Nebula TUN device. + // This should only happen on Darwin-based and FreeBSD hosts, which + // routes packets from the Nebula IP to the Nebula IP through the Nebula + // TUN device. if immediatelyForwardToSelf { _, err := f.readers[q].Write(packet) if err != nil { diff --git a/inside_bsd.go b/inside_bsd.go new file mode 100644 index 0000000..c9c7730 --- /dev/null +++ b/inside_bsd.go @@ -0,0 +1,6 @@ +//go:build darwin || dragonfly || freebsd || netbsd || openbsd +// +build darwin dragonfly freebsd netbsd openbsd + +package nebula + +const immediatelyForwardToSelf bool = true diff --git a/inside_darwin.go b/inside_darwin.go deleted file mode 100644 index a3b98ba..0000000 --- a/inside_darwin.go +++ /dev/null @@ -1,3 +0,0 @@ -package nebula - -const immediatelyForwardToSelf bool = true diff --git a/inside_generic.go b/inside_generic.go index 8eb98e8..0bb2345 100644 --- a/inside_generic.go +++ b/inside_generic.go @@ -1,5 +1,5 @@ -//go:build !darwin -// +build !darwin +//go:build !darwin && !dragonfly && !freebsd && !netbsd && !openbsd +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd package nebula From a06977bbd568859d30d10a58de52e79b3e1e63a8 Mon Sep 17 00:00:00 2001 From: Nate Brown Date: Mon, 13 Feb 2023 14:41:05 -0600 Subject: [PATCH 22/26] Track connections by local index id instead of vpn ip (#807) --- connection_manager.go | 139 ++++++++++++++++++------------------- connection_manager_test.go | 51 +++++++++----- hostmap.go | 5 +- inside.go | 4 +- outside.go | 10 +-- 5 files changed, 109 insertions(+), 100 deletions(-) diff --git a/connection_manager.go b/connection_manager.go index 82167ea..8135421 100644 --- a/connection_manager.go +++ b/connection_manager.go @@ -7,7 +7,6 @@ import ( "github.com/sirupsen/logrus" "github.com/slackhq/nebula/header" - "github.com/slackhq/nebula/iputil" ) // TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet @@ -15,16 +14,16 @@ import ( type connectionManager struct { hostMap *HostMap - in map[iputil.VpnIp]struct{} + in map[uint32]struct{} inLock *sync.RWMutex - out map[iputil.VpnIp]struct{} + out map[uint32]struct{} outLock *sync.RWMutex - TrafficTimer *LockingTimerWheel[iputil.VpnIp] + TrafficTimer *LockingTimerWheel[uint32] intf *Interface - pendingDeletion map[iputil.VpnIp]int + pendingDeletion map[uint32]int pendingDeletionLock *sync.RWMutex - pendingDeletionTimer *LockingTimerWheel[iputil.VpnIp] + pendingDeletionTimer *LockingTimerWheel[uint32] checkInterval int pendingDeletionInterval int @@ -36,15 +35,15 @@ type connectionManager struct { func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager { nc := &connectionManager{ hostMap: intf.hostMap, - in: make(map[iputil.VpnIp]struct{}), + in: make(map[uint32]struct{}), inLock: &sync.RWMutex{}, - out: make(map[iputil.VpnIp]struct{}), + out: make(map[uint32]struct{}), outLock: &sync.RWMutex{}, - TrafficTimer: NewLockingTimerWheel[iputil.VpnIp](time.Millisecond*500, time.Second*60), + TrafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, time.Second*60), intf: intf, - pendingDeletion: make(map[iputil.VpnIp]int), + pendingDeletion: make(map[uint32]int), pendingDeletionLock: &sync.RWMutex{}, - pendingDeletionTimer: NewLockingTimerWheel[iputil.VpnIp](time.Millisecond*500, time.Second*60), + pendingDeletionTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, time.Second*60), checkInterval: checkInterval, pendingDeletionInterval: pendingDeletionInterval, l: l, @@ -53,41 +52,41 @@ func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface return nc } -func (n *connectionManager) In(ip iputil.VpnIp) { +func (n *connectionManager) In(localIndex uint32) { n.inLock.RLock() // If this already exists, return - if _, ok := n.in[ip]; ok { + if _, ok := n.in[localIndex]; ok { n.inLock.RUnlock() return } n.inLock.RUnlock() n.inLock.Lock() - n.in[ip] = struct{}{} + n.in[localIndex] = struct{}{} n.inLock.Unlock() } -func (n *connectionManager) Out(ip iputil.VpnIp) { +func (n *connectionManager) Out(localIndex uint32) { n.outLock.RLock() // If this already exists, return - if _, ok := n.out[ip]; ok { + if _, ok := n.out[localIndex]; ok { n.outLock.RUnlock() return } n.outLock.RUnlock() n.outLock.Lock() // double check since we dropped the lock temporarily - if _, ok := n.out[ip]; ok { + if _, ok := n.out[localIndex]; ok { n.outLock.Unlock() return } - n.out[ip] = struct{}{} - n.AddTrafficWatch(ip, n.checkInterval) + n.out[localIndex] = struct{}{} + n.AddTrafficWatch(localIndex, n.checkInterval) n.outLock.Unlock() } -func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool { +func (n *connectionManager) CheckIn(localIndex uint32) bool { n.inLock.RLock() - if _, ok := n.in[vpnIp]; ok { + if _, ok := n.in[localIndex]; ok { n.inLock.RUnlock() return true } @@ -95,35 +94,35 @@ func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool { return false } -func (n *connectionManager) ClearIP(ip iputil.VpnIp) { +func (n *connectionManager) ClearLocalIndex(localIndex uint32) { n.inLock.Lock() n.outLock.Lock() - delete(n.in, ip) - delete(n.out, ip) + delete(n.in, localIndex) + delete(n.out, localIndex) n.inLock.Unlock() n.outLock.Unlock() } -func (n *connectionManager) ClearPendingDeletion(ip iputil.VpnIp) { +func (n *connectionManager) ClearPendingDeletion(localIndex uint32) { n.pendingDeletionLock.Lock() - delete(n.pendingDeletion, ip) + delete(n.pendingDeletion, localIndex) n.pendingDeletionLock.Unlock() } -func (n *connectionManager) AddPendingDeletion(ip iputil.VpnIp) { +func (n *connectionManager) AddPendingDeletion(localIndex uint32) { n.pendingDeletionLock.Lock() - if _, ok := n.pendingDeletion[ip]; ok { - n.pendingDeletion[ip] += 1 + if _, ok := n.pendingDeletion[localIndex]; ok { + n.pendingDeletion[localIndex] += 1 } else { - n.pendingDeletion[ip] = 0 + n.pendingDeletion[localIndex] = 0 } - n.pendingDeletionTimer.Add(ip, time.Second*time.Duration(n.pendingDeletionInterval)) + n.pendingDeletionTimer.Add(localIndex, time.Second*time.Duration(n.pendingDeletionInterval)) n.pendingDeletionLock.Unlock() } -func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool { +func (n *connectionManager) checkPendingDeletion(localIndex uint32) bool { n.pendingDeletionLock.RLock() - if _, ok := n.pendingDeletion[ip]; ok { + if _, ok := n.pendingDeletion[localIndex]; ok { n.pendingDeletionLock.RUnlock() return true @@ -132,8 +131,8 @@ func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool { return false } -func (n *connectionManager) AddTrafficWatch(vpnIp iputil.VpnIp, seconds int) { - n.TrafficTimer.Add(vpnIp, time.Second*time.Duration(seconds)) +func (n *connectionManager) AddTrafficWatch(localIndex uint32, seconds int) { + n.TrafficTimer.Add(localIndex, time.Second*time.Duration(seconds)) } func (n *connectionManager) Start(ctx context.Context) { @@ -162,23 +161,23 @@ func (n *connectionManager) Run(ctx context.Context) { func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) { n.TrafficTimer.Advance(now) for { - vpnIp, has := n.TrafficTimer.Purge() + localIndex, has := n.TrafficTimer.Purge() if !has { break } // Check for traffic coming back in from this host. - traf := n.CheckIn(vpnIp) + traf := n.CheckIn(localIndex) - hostinfo, err := n.hostMap.QueryVpnIp(vpnIp) + hostinfo, err := n.hostMap.QueryIndex(localIndex) if err != nil { - n.l.Debugf("Not found in hostmap: %s", vpnIp) - n.ClearIP(vpnIp) - n.ClearPendingDeletion(vpnIp) + n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap") + n.ClearLocalIndex(localIndex) + n.ClearPendingDeletion(localIndex) continue } - if n.handleInvalidCertificate(now, vpnIp, hostinfo) { + if n.handleInvalidCertificate(now, hostinfo) { continue } @@ -186,12 +185,12 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) // expired, just ignore. if traf { if n.l.Level >= logrus.DebugLevel { - n.l.WithField("vpnIp", vpnIp). + hostinfo.logger(n.l). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). Debug("Tunnel status") } - n.ClearIP(vpnIp) - n.ClearPendingDeletion(vpnIp) + n.ClearLocalIndex(localIndex) + n.ClearPendingDeletion(localIndex) continue } @@ -201,12 +200,12 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) if hostinfo != nil && hostinfo.ConnectionState != nil { // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues - n.intf.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, p, nb, out) + n.intf.sendMessageToVpnIp(header.Test, header.TestRequest, hostinfo, p, nb, out) } else { - hostinfo.logger(n.l).Debugf("Hostinfo sadness: %s", vpnIp) + hostinfo.logger(n.l).Debugf("Hostinfo sadness") } - n.AddPendingDeletion(vpnIp) + n.AddPendingDeletion(localIndex) } } @@ -214,63 +213,58 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) func (n *connectionManager) HandleDeletionTick(now time.Time) { n.pendingDeletionTimer.Advance(now) for { - vpnIp, has := n.pendingDeletionTimer.Purge() + localIndex, has := n.pendingDeletionTimer.Purge() if !has { break } - hostinfo, err := n.hostMap.QueryVpnIp(vpnIp) + hostinfo, err := n.hostMap.QueryIndex(localIndex) if err != nil { - n.l.Debugf("Not found in hostmap: %s", vpnIp) - n.ClearIP(vpnIp) - n.ClearPendingDeletion(vpnIp) + n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap") + n.ClearLocalIndex(localIndex) + n.ClearPendingDeletion(localIndex) continue } - if n.handleInvalidCertificate(now, vpnIp, hostinfo) { + if n.handleInvalidCertificate(now, hostinfo) { continue } // If we saw an incoming packets from this ip and peer's certificate is not // expired, just ignore. - traf := n.CheckIn(vpnIp) + traf := n.CheckIn(localIndex) if traf { - n.l.WithField("vpnIp", vpnIp). + hostinfo.logger(n.l). WithField("tunnelCheck", m{"state": "alive", "method": "active"}). Debug("Tunnel status") - n.ClearIP(vpnIp) - n.ClearPendingDeletion(vpnIp) + n.ClearLocalIndex(localIndex) + n.ClearPendingDeletion(localIndex) continue } // If it comes around on deletion wheel and hasn't resolved itself, delete - if n.checkPendingDeletion(vpnIp) { + if n.checkPendingDeletion(localIndex) { cn := "" if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil { cn = hostinfo.ConnectionState.peerCert.Details.Name } + hostinfo.logger(n.l). WithField("tunnelCheck", m{"state": "dead", "method": "active"}). WithField("certName", cn). Info("Tunnel status") - n.ClearIP(vpnIp) - n.ClearPendingDeletion(vpnIp) - // TODO: This is only here to let tests work. Should do proper mocking - if n.intf.lightHouse != nil { - n.intf.lightHouse.DeleteVpnIp(vpnIp) - } n.hostMap.DeleteHostInfo(hostinfo) - } else { - n.ClearIP(vpnIp) - n.ClearPendingDeletion(vpnIp) } + + n.ClearLocalIndex(localIndex) + n.ClearPendingDeletion(localIndex) } } // handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid -func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil.VpnIp, hostinfo *HostInfo) bool { +func (n *connectionManager) handleInvalidCertificate(now time.Time, hostinfo *HostInfo) bool { if !n.intf.disconnectInvalid { return false } @@ -286,8 +280,7 @@ func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil } fingerprint, _ := remoteCert.Sha256Sum() - n.l.WithField("vpnIp", vpnIp).WithError(err). - WithField("certName", remoteCert.Details.Name). + hostinfo.logger(n.l).WithError(err). WithField("fingerprint", fingerprint). Info("Remote certificate is no longer valid, tearing down the tunnel") @@ -295,7 +288,7 @@ func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil n.intf.sendCloseTunnel(hostinfo) n.intf.closeTunnel(hostinfo) - n.ClearIP(vpnIp) - n.ClearPendingDeletion(vpnIp) + n.ClearLocalIndex(hostinfo.localIndexId) + n.ClearPendingDeletion(hostinfo.localIndexId) return true } diff --git a/connection_manager_test.go b/connection_manager_test.go index df42800..58fdbcd 100644 --- a/connection_manager_test.go +++ b/connection_manager_test.go @@ -71,16 +71,22 @@ func Test_NewConnectionManagerTest(t *testing.T) { out := make([]byte, mtu) nc.HandleMonitorTick(now, p, nb, out) // Add an ip we have established a connection w/ to hostmap - hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil) + hostinfo := &HostInfo{ + vpnIp: vpnIp, + localIndexId: 1099, + remoteIndexId: 9901, + } hostinfo.ConnectionState = &ConnectionState{ certState: cs, H: &noise.HandshakeState{}, } + nc.hostMap.addHostInfo(hostinfo, ifce) // We saw traffic out to vpnIp - nc.Out(vpnIp) - assert.NotContains(t, nc.pendingDeletion, vpnIp) - assert.Contains(t, nc.hostMap.Hosts, vpnIp) + nc.Out(hostinfo.localIndexId) + assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) + assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) + assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) // Move ahead 5s. Nothing should happen next_tick := now.Add(5 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) @@ -90,16 +96,17 @@ func Test_NewConnectionManagerTest(t *testing.T) { nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // This host should now be up for deletion - assert.Contains(t, nc.pendingDeletion, vpnIp) - assert.Contains(t, nc.hostMap.Hosts, vpnIp) + assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId) + assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) + assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) // Move ahead some more next_tick = now.Add(45 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // The host should be evicted - assert.NotContains(t, nc.pendingDeletion, vpnIp) - assert.NotContains(t, nc.hostMap.Hosts, vpnIp) - + assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) + assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp) + assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId) } func Test_NewConnectionManagerTest2(t *testing.T) { @@ -140,14 +147,19 @@ func Test_NewConnectionManagerTest2(t *testing.T) { out := make([]byte, mtu) nc.HandleMonitorTick(now, p, nb, out) // Add an ip we have established a connection w/ to hostmap - hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil) + hostinfo := &HostInfo{ + vpnIp: vpnIp, + localIndexId: 1099, + remoteIndexId: 9901, + } hostinfo.ConnectionState = &ConnectionState{ certState: cs, H: &noise.HandshakeState{}, } + nc.hostMap.addHostInfo(hostinfo, ifce) // We saw traffic out to vpnIp - nc.Out(vpnIp) + nc.Out(hostinfo.localIndexId) assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp) // Move ahead 5s. Nothing should happen @@ -159,18 +171,19 @@ func Test_NewConnectionManagerTest2(t *testing.T) { nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // This host should now be up for deletion - assert.Contains(t, nc.pendingDeletion, vpnIp) + assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Hosts, vpnIp) + assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) // We heard back this time - nc.In(vpnIp) + nc.In(hostinfo.localIndexId) // Move ahead some more next_tick = now.Add(45 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) - // The host should be evicted - assert.NotContains(t, nc.pendingDeletion, vpnIp) - assert.Contains(t, nc.hostMap.Hosts, vpnIp) - + // The host should not be evicted + assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) + assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) + assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) } // Check if we can disconnect the peer. @@ -257,13 +270,13 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) { // Check if to disconnect with invalid certificate. // Should be alive. nextTick := now.Add(45 * time.Second) - destroyed := nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo) + destroyed := nc.handleInvalidCertificate(nextTick, hostinfo) assert.False(t, destroyed) // Move ahead 61s. // Check if to disconnect with invalid certificate. // Should be disconnected. nextTick = now.Add(61 * time.Second) - destroyed = nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo) + destroyed = nc.handleInvalidCertificate(nextTick, hostinfo) assert.True(t, destroyed) } diff --git a/hostmap.go b/hostmap.go index 84b2041..372333e 100644 --- a/hostmap.go +++ b/hostmap.go @@ -764,7 +764,10 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry { return logrus.NewEntry(l) } - li := l.WithField("vpnIp", i.vpnIp) + li := l.WithField("vpnIp", i.vpnIp). + WithField("localIndex", i.localIndexId). + WithField("remoteIndex", i.remoteIndexId) + if connState := i.ConnectionState; connState != nil { if peerCert := connState.peerCert; peerCert != nil { li = li.WithField("certName", peerCert.Details.Name) diff --git a/inside.go b/inside.go index 5ed152c..38d9332 100644 --- a/inside.go +++ b/inside.go @@ -224,7 +224,7 @@ func (f *Interface) SendVia(viaIfc interface{}, c := via.ConnectionState.messageCounter.Add(1) out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c) - f.connectionManager.Out(via.vpnIp) + f.connectionManager.Out(via.localIndexId) // Authenticate the header and payload, but do not encrypt for this message type. // The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload. @@ -284,7 +284,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType //l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p) out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c) - f.connectionManager.Out(hostinfo.vpnIp) + f.connectionManager.Out(hostinfo.localIndexId) // Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against // all our IPs and enable a faster roaming. diff --git a/outside.go b/outside.go index f64815a..c43a385 100644 --- a/outside.go +++ b/outside.go @@ -84,7 +84,7 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by signedPayload = signedPayload[header.Len:] // Pull the Roaming parts up here, and return in all call paths. f.handleHostRoaming(hostinfo, addr) - f.connectionManager.In(hostinfo.vpnIp) + f.connectionManager.In(hostinfo.localIndexId) relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex) if !ok { @@ -237,14 +237,14 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []by f.handleHostRoaming(hostinfo, addr) - f.connectionManager.In(hostinfo.vpnIp) + f.connectionManager.In(hostinfo.localIndexId) } // closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote func (f *Interface) closeTunnel(hostInfo *HostInfo) { //TODO: this would be better as a single function in ConnectionManager that handled locks appropriately - f.connectionManager.ClearIP(hostInfo.vpnIp) - f.connectionManager.ClearPendingDeletion(hostInfo.vpnIp) + f.connectionManager.ClearLocalIndex(hostInfo.localIndexId) + f.connectionManager.ClearPendingDeletion(hostInfo.localIndexId) f.lightHouse.DeleteVpnIp(hostInfo.vpnIp) f.hostMap.DeleteHostInfo(hostInfo) @@ -405,7 +405,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out return } - f.connectionManager.In(hostinfo.vpnIp) + f.connectionManager.In(hostinfo.localIndexId) _, err = f.readers[q].Write(out) if err != nil { f.l.WithError(err).Error("Failed to write to tun") From 469ae7874890b8fd0f47e9448486b76d87a13909 Mon Sep 17 00:00:00 2001 From: Caleb Jasik Date: Mon, 13 Feb 2023 14:42:58 -0600 Subject: [PATCH 23/26] Add homebrew install method to readme (#630) --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index ca54606..ba4e997 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,11 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for $ sudo dnf install nebula ``` +- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb) + ``` + $ brew install nebula + ``` + #### Mobile - [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200) From 2ea360e5e25d77b969b5b4f5e6a5c1a2affec843 Mon Sep 17 00:00:00 2001 From: Nate Brown Date: Thu, 16 Feb 2023 13:23:33 -0600 Subject: [PATCH 24/26] Render hostmaps as mermaid graphs in e2e tests (#815) --- control_tester.go | 12 ++++- e2e/handshakes_test.go | 4 ++ e2e/router/hostmap.go | 109 +++++++++++++++++++++++++++++++++++++++++ e2e/router/router.go | 88 +++++++++++++++++++++++++++++++-- hostmap_tester.go | 24 +++++++++ 5 files changed, 233 insertions(+), 4 deletions(-) create mode 100644 e2e/router/hostmap.go create mode 100644 hostmap_tester.go diff --git a/control_tester.go b/control_tester.go index 7852943..4fa0763 100644 --- a/control_tester.go +++ b/control_tester.go @@ -6,6 +6,8 @@ package nebula import ( "net" + "github.com/slackhq/nebula/cert" + "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/slackhq/nebula/header" @@ -14,7 +16,7 @@ import ( "github.com/slackhq/nebula/udp" ) -// WaitForTypeByIndex will pipe all messages from this control device into the pipeTo control device +// WaitForType will pipe all messages from this control device into the pipeTo control device // returning after a message matching the criteria has been piped func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) { h := &header.H{} @@ -153,3 +155,11 @@ func (c *Control) KillPendingTunnel(vpnIp net.IP) bool { c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo) return true } + +func (c *Control) GetHostmap() *HostMap { + return c.f.hostMap +} + +func (c *Control) GetCert() *cert.NebulaCertificate { + return c.f.certState.certificate +} diff --git a/e2e/handshakes_test.go b/e2e/handshakes_test.go index b92d7e0..bfde43e 100644 --- a/e2e/handshakes_test.go +++ b/e2e/handshakes_test.go @@ -85,6 +85,7 @@ func TestGoodHandshake(t *testing.T) { defer r.RenderFlow() assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) + r.RenderHostmaps("Final hostmaps", myControl, theirControl) myControl.Stop() theirControl.Stop() //TODO: assert hostmaps @@ -150,6 +151,7 @@ func TestWrongResponderHandshake(t *testing.T) { //NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete //TODO: assert hostmaps for everyone + r.RenderHostmaps("Final hostmaps", myControl, theirControl, evilControl) t.Log("Success!") myControl.Stop() theirControl.Stop() @@ -205,6 +207,7 @@ func Test_Case1_Stage1Race(t *testing.T) { t.Log("Do a bidirectional tunnel test") assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) + r.RenderHostmaps("Final hostmaps", myControl, theirControl) myControl.Stop() theirControl.Stop() //TODO: assert hostmaps @@ -235,6 +238,7 @@ func TestRelays(t *testing.T) { p := r.RouteForAllUntilTxTun(theirControl) assertUdpPacket(t, []byte("Hi from me"), p, myVpnIp, theirVpnIp, 80, 80) + r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl) //TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it } diff --git a/e2e/router/hostmap.go b/e2e/router/hostmap.go new file mode 100644 index 0000000..948281a --- /dev/null +++ b/e2e/router/hostmap.go @@ -0,0 +1,109 @@ +//go:build e2e_testing +// +build e2e_testing + +package router + +import ( + "fmt" + "strings" + + "github.com/slackhq/nebula" +) + +type edge struct { + from string + to string + dual bool +} + +func renderHostmaps(controls ...*nebula.Control) string { + var lines []*edge + r := "graph TB\n" + for _, c := range controls { + sr, se := renderHostmap(c) + r += sr + for _, e := range se { + add := true + + // Collapse duplicate edges into a bi-directionally connected edge + for _, ge := range lines { + if e.to == ge.from && e.from == ge.to { + add = false + ge.dual = true + break + } + } + + if add { + lines = append(lines, e) + } + } + } + + for _, line := range lines { + if line.dual { + r += fmt.Sprintf("\t%v <--> %v\n", line.from, line.to) + } else { + r += fmt.Sprintf("\t%v --> %v\n", line.from, line.to) + } + + } + + return r +} + +func renderHostmap(c *nebula.Control) (string, []*edge) { + var lines []string + var globalLines []*edge + + clusterName := strings.Trim(c.GetCert().Details.Name, " ") + clusterVpnIp := c.GetCert().Details.Ips[0].IP + r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp) + + hm := c.GetHostmap() + + // Draw the vpn to index nodes + r += fmt.Sprintf("\t\tsubgraph %s.hosts[\"Hosts (vpn ip to index)\"]\n", clusterName) + for vpnIp, hi := range hm.Hosts { + r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, vpnIp, vpnIp) + lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, hi.GetLocalIndex())) + + rs := hi.GetRelayState() + for _, relayIp := range rs.CopyRelayIps() { + lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp)) + } + + for _, relayIp := range rs.CopyRelayForIdxs() { + lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, relayIp)) + } + } + r += "\t\tend\n" + + // Draw the relay hostinfos + if len(hm.Relays) > 0 { + r += fmt.Sprintf("\t\tsubgraph %s.relays[\"Relays (relay index to hostinfo)\"]\n", clusterName) + for relayIndex, hi := range hm.Relays { + r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, relayIndex, relayIndex) + lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, relayIndex, clusterName, hi.GetLocalIndex())) + } + r += "\t\tend\n" + } + + // Draw the local index to relay or remote index nodes + r += fmt.Sprintf("\t\tsubgraph indexes.%s[\"Indexes (index to hostinfo)\"]\n", clusterName) + for idx, hi := range hm.Indexes { + r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp()) + remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ") + globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())}) + _ = hi + } + r += "\t\tend\n" + + // Add the edges inside this host + for _, line := range lines { + r += fmt.Sprintf("\t\t%v\n", line) + } + + r += "\tend\n" + return r, globalLines +} diff --git a/e2e/router/router.go b/e2e/router/router.go index 7b916a0..aa56db8 100644 --- a/e2e/router/router.go +++ b/e2e/router/router.go @@ -40,7 +40,12 @@ type R struct { // A map of vpn ip to the nebula control it belongs to vpnControls map[iputil.VpnIp]*nebula.Control - flow []flowEntry + ignoreFlows []ignoreFlow + flow []flowEntry + + // A set of additional mermaid graphs to draw in the flow log markdown file + // Currently consisting only of hostmap renders + additionalGraphs []mermaidGraph // All interactions are locked to help serialize behavior sync.Mutex @@ -50,6 +55,24 @@ type R struct { t testing.TB } +type ignoreFlow struct { + tun NullBool + messageType header.MessageType + subType header.MessageSubType + //from + //to +} + +type mermaidGraph struct { + title string + content string +} + +type NullBool struct { + HasValue bool + IsTrue bool +} + type flowEntry struct { note string packet *packet @@ -98,6 +121,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R { inNat: make(map[string]*nebula.Control), outNat: make(map[string]net.UDPAddr), flow: []flowEntry{}, + ignoreFlows: []ignoreFlow{}, fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())), t: t, cancelRender: cancel, @@ -219,15 +243,55 @@ func (r *R) renderFlow() { } fmt.Fprintf(f, - " %s%s%s: %s(%s), counter: %v\n", + " %s%s%s: %s(%s), index %v, counter: %v\n", strings.Replace(p.from.GetUDPAddr(), ":", "#58;", 1), line, strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1), - h.TypeName(), h.SubTypeName(), h.MessageCounter, + h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter, ) } } fmt.Fprintln(f, "```") + + for _, g := range r.additionalGraphs { + fmt.Fprintf(f, "## %s\n", g.title) + fmt.Fprintln(f, "```mermaid") + fmt.Fprintln(f, g.content) + fmt.Fprintln(f, "```") + } +} + +// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria. +// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets +// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered +func (r *R) IgnoreFlow(messageType header.MessageType, subType header.MessageSubType, tun NullBool) { + r.Lock() + defer r.Unlock() + r.ignoreFlows = append(r.ignoreFlows, ignoreFlow{ + tun, + messageType, + subType, + }) +} + +func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) { + r.Lock() + defer r.Unlock() + + s := renderHostmaps(controls...) + if len(r.additionalGraphs) > 0 { + lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1] + if lastGraph.content == s && lastGraph.title == title { + // Ignore this rendering if it matches the last rendering added + // This is useful if you want to track rendering changes + return + } + } + + r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{ + title: title, + content: s, + }) } // InjectFlow can be used to record packet flow if the test is handling the routing on its own. @@ -268,6 +332,24 @@ func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool return nil } + if len(r.ignoreFlows) > 0 { + var h header.H + err := h.Parse(p.Data) + if err != nil { + panic(err) + } + + for _, i := range r.ignoreFlows { + if !tun { + if i.messageType == h.Type && i.subType == h.Subtype { + return nil + } + } else if i.tun.HasValue && i.tun.IsTrue { + return nil + } + } + } + fp := &packet{ from: from, to: to, diff --git a/hostmap_tester.go b/hostmap_tester.go new file mode 100644 index 0000000..1d4323f --- /dev/null +++ b/hostmap_tester.go @@ -0,0 +1,24 @@ +//go:build e2e_testing +// +build e2e_testing + +package nebula + +// This file contains functions used to export information to the e2e testing framework + +import "github.com/slackhq/nebula/iputil" + +func (i *HostInfo) GetVpnIp() iputil.VpnIp { + return i.vpnIp +} + +func (i *HostInfo) GetLocalIndex() uint32 { + return i.localIndexId +} + +func (i *HostInfo) GetRemoteIndex() uint32 { + return i.remoteIndexId +} + +func (i *HostInfo) GetRelayState() RelayState { + return i.relayState +} From 92cc32f84403d5c4fbbd07404be1b009260b70b0 Mon Sep 17 00:00:00 2001 From: Nate Brown Date: Mon, 13 Mar 2023 12:35:14 -0500 Subject: [PATCH 25/26] Remove handshake race avoidance (#820) Co-authored-by: Wade Simmons --- connection_manager.go | 24 ++- connection_manager_test.go | 4 +- control.go | 32 +++- e2e/handshakes_test.go | 298 ++++++++++++++++++++++++++++++------- e2e/helpers_test.go | 8 +- e2e/router/hostmap.go | 34 ++++- e2e/router/router.go | 34 ++++- go.mod | 1 + go.sum | 2 + handshake_ix.go | 28 ++-- handshake_manager.go | 75 ++++------ hostmap.go | 122 ++++++++++++--- hostmap_test.go | 206 +++++++++++++++++++++++++ outside.go | 8 +- overlay/tun_tester.go | 2 +- relay_manager.go | 5 + ssh.go | 15 +- udp/udp_tester.go | 2 +- 18 files changed, 742 insertions(+), 158 deletions(-) diff --git a/connection_manager.go b/connection_manager.go index 8135421..d1e78ca 100644 --- a/connection_manager.go +++ b/connection_manager.go @@ -181,6 +181,14 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) continue } + // Does the vpnIp point to this hostinfo or is it ancillary? If we have ancillary hostinfos then we need to + // decide if this should be the main hostinfo if we are seeing traffic on it + primary, _ := n.hostMap.QueryVpnIp(hostinfo.vpnIp) + mainHostInfo := true + if primary != nil && primary != hostinfo { + mainHostInfo = false + } + // If we saw an incoming packets from this ip and peer's certificate is not // expired, just ignore. if traf { @@ -191,6 +199,20 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) } n.ClearLocalIndex(localIndex) n.ClearPendingDeletion(localIndex) + + if !mainHostInfo { + if hostinfo.vpnIp > n.intf.myVpnIp { + // We are receiving traffic on the non primary hostinfo and we really just want 1 tunnel. Make + // This the primary and prime the old primary hostinfo for testing + n.hostMap.MakePrimary(hostinfo) + n.Out(primary.localIndexId) + } else { + // This hostinfo is still being used despite not being the primary hostinfo for this vpn ip + // Keep tracking so that we can tear it down when it goes away + n.Out(hostinfo.localIndexId) + } + } + continue } @@ -198,7 +220,7 @@ func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) WithField("tunnelCheck", m{"state": "testing", "method": "active"}). Debug("Tunnel status") - if hostinfo != nil && hostinfo.ConnectionState != nil { + if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo { // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues n.intf.sendMessageToVpnIp(header.Test, header.TestRequest, hostinfo, p, nb, out) diff --git a/connection_manager_test.go b/connection_manager_test.go index 58fdbcd..51e331b 100644 --- a/connection_manager_test.go +++ b/connection_manager_test.go @@ -80,7 +80,7 @@ func Test_NewConnectionManagerTest(t *testing.T) { certState: cs, H: &noise.HandshakeState{}, } - nc.hostMap.addHostInfo(hostinfo, ifce) + nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) // We saw traffic out to vpnIp nc.Out(hostinfo.localIndexId) @@ -156,7 +156,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) { certState: cs, H: &noise.HandshakeState{}, } - nc.hostMap.addHostInfo(hostinfo, ifce) + nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) // We saw traffic out to vpnIp nc.Out(hostinfo.localIndexId) diff --git a/control.go b/control.go index adc2a48..ab3a5cb 100644 --- a/control.go +++ b/control.go @@ -95,12 +95,21 @@ func (c *Control) RebindUDPServer() { c.f.rebindCount++ } -// ListHostmap returns details about the actual or pending (handshaking) hostmap -func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo { +// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip +func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo { if pendingMap { - return listHostMap(c.f.handshakeManager.pendingHostMap) + return listHostMapHosts(c.f.handshakeManager.pendingHostMap) } else { - return listHostMap(c.f.hostMap) + return listHostMapHosts(c.f.hostMap) + } +} + +// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id +func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo { + if pendingMap { + return listHostMapIndexes(c.f.handshakeManager.pendingHostMap) + } else { + return listHostMapIndexes(c.f.hostMap) } } @@ -232,7 +241,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo { return chi } -func listHostMap(hm *HostMap) []ControlHostInfo { +func listHostMapHosts(hm *HostMap) []ControlHostInfo { hm.RLock() hosts := make([]ControlHostInfo, len(hm.Hosts)) i := 0 @@ -244,3 +253,16 @@ func listHostMap(hm *HostMap) []ControlHostInfo { return hosts } + +func listHostMapIndexes(hm *HostMap) []ControlHostInfo { + hm.RLock() + hosts := make([]ControlHostInfo, len(hm.Indexes)) + i := 0 + for _, v := range hm.Indexes { + hosts[i] = copyHostInfo(v, hm.preferredRanges) + i++ + } + hm.RUnlock() + + return hosts +} diff --git a/e2e/handshakes_test.go b/e2e/handshakes_test.go index bfde43e..d12412e 100644 --- a/e2e/handshakes_test.go +++ b/e2e/handshakes_test.go @@ -19,10 +19,10 @@ import ( func BenchmarkHotPath(b *testing.B) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) myControl, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) - theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) // Put their info in our lighthouse - myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) + myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) // Start the servers myControl.Start() @@ -32,7 +32,7 @@ func BenchmarkHotPath(b *testing.B) { r.CancelFlowLogs() for n := 0; n < b.N; n++ { - myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) _ = r.RouteForAllUntilTxTun(theirControl) } @@ -42,18 +42,18 @@ func BenchmarkHotPath(b *testing.B) { func TestGoodHandshake(t *testing.T) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) - myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) - theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) + myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) // Put their info in our lighthouse - myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) + myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) // Start the servers myControl.Start() theirControl.Start() t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side") - myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) t.Log("Have them consume my stage 0 packet. They have a tunnel now") theirControl.InjectUDPPacket(myControl.GetFromUDP(true)) @@ -74,16 +74,16 @@ func TestGoodHandshake(t *testing.T) { myControl.WaitForType(1, 0, theirControl) t.Log("Make sure our host infos are correct") - assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl) + assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl) t.Log("Get that cached packet and make sure it looks right") myCachedPacket := theirControl.GetFromTun(true) - assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) + assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) t.Log("Do a bidirectional tunnel test") r := router.NewR(t, myControl, theirControl) defer r.RenderFlow() - assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) r.RenderHostmaps("Final hostmaps", myControl, theirControl) myControl.Stop() @@ -97,15 +97,15 @@ func TestWrongResponderHandshake(t *testing.T) { // The IPs here are chosen on purpose: // The current remote handling will sort by preference, public, and then lexically. // So we need them to have a higher address than evil (we could apply a preference though) - myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil) - theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil) + myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil) evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil) // Add their real udp addr, which should be tried after evil. - myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) + myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) // Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse. - myControl.InjectLightHouseAddr(theirVpnIp, evilUdpAddr) + myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr) // Build a router so we don't have to reason who gets which packet r := router.NewR(t, myControl, theirControl, evilControl) @@ -117,7 +117,7 @@ func TestWrongResponderHandshake(t *testing.T) { evilControl.Start() t.Log("Start the handshake process, we will route until we see our cached packet get sent to them") - myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType { h := &header.H{} err := h.Parse(p.Data) @@ -136,18 +136,18 @@ func TestWrongResponderHandshake(t *testing.T) { t.Log("My cached packet should be received by them") myCachedPacket := theirControl.GetFromTun(true) - assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) + assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) t.Log("Test the tunnel with them") - assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl) - assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) + assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl) + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) t.Log("Flush all packets from all controllers") r.FlushAll() t.Log("Ensure ensure I don't have any hostinfo artifacts from evil") - assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), true), "My pending hostmap should not contain evil") - assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), false), "My main hostmap should not contain evil") + assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil") + assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil") //NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete //TODO: assert hostmaps for everyone @@ -157,14 +157,17 @@ func TestWrongResponderHandshake(t *testing.T) { theirControl.Stop() } -func Test_Case1_Stage1Race(t *testing.T) { +func TestStage1Race(t *testing.T) { + // This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow + // But will eventually collapse down to a single tunnel + ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) - myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) - theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) + myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) // Put their info in our lighthouse and vice versa - myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) - theirControl.InjectLightHouseAddr(myVpnIp, myUdpAddr) + myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) + theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) // Build a router so we don't have to reason who gets which packet r := router.NewR(t, myControl, theirControl) @@ -175,8 +178,8 @@ func Test_Case1_Stage1Race(t *testing.T) { theirControl.Start() t.Log("Trigger a handshake to start on both me and them") - myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) - theirControl.InjectTunUDPPacket(myVpnIp, 80, 80, []byte("Hi from them")) + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) + theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them")) t.Log("Get both stage 1 handshake packets") myHsForThem := myControl.GetFromUDP(true) @@ -185,44 +188,165 @@ func Test_Case1_Stage1Race(t *testing.T) { r.Log("Now inject both stage 1 handshake packets") r.InjectUDPPacket(theirControl, myControl, theirHsForMe) r.InjectUDPPacket(myControl, theirControl, myHsForThem) - //TODO: they should win, grab their index for me and make sure I use it in the end. - r.Log("They should not have a stage 2 (won the race) but I should send one") - r.InjectUDPPacket(myControl, theirControl, myControl.GetFromUDP(true)) + r.Log("Route until they receive a message packet") + myCachedPacket := r.RouteForAllUntilTxTun(theirControl) + assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) - r.Log("Route for me until I send a message packet to them") - r.RouteForAllUntilAfterMsgTypeTo(theirControl, header.Message, header.MessageNone) + r.Log("Their cached packet should be received by me") + theirCachedPacket := r.RouteForAllUntilTxTun(myControl) + assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80) - t.Log("My cached packet should be received by them") - myCachedPacket := theirControl.GetFromTun(true) - assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) + r.Log("Do a bidirectional tunnel test") + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) - t.Log("Route for them until I send a message packet to me") - theirControl.WaitForType(1, 0, myControl) + myHostmapHosts := myControl.ListHostmapHosts(false) + myHostmapIndexes := myControl.ListHostmapIndexes(false) + theirHostmapHosts := theirControl.ListHostmapHosts(false) + theirHostmapIndexes := theirControl.ListHostmapIndexes(false) - t.Log("Their cached packet should be received by me") - theirCachedPacket := myControl.GetFromTun(true) - assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIp, myVpnIp, 80, 80) + // We should have two tunnels on both sides + assert.Len(t, myHostmapHosts, 1) + assert.Len(t, theirHostmapHosts, 1) + assert.Len(t, myHostmapIndexes, 2) + assert.Len(t, theirHostmapIndexes, 2) - t.Log("Do a bidirectional tunnel test") - assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) + r.RenderHostmaps("Starting hostmaps", myControl, theirControl) + + r.Log("Spin until connection manager tears down a tunnel") + + for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 { + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) + t.Log("Connection manager hasn't ticked yet") + time.Sleep(time.Second) + } + + myFinalHostmapHosts := myControl.ListHostmapHosts(false) + myFinalHostmapIndexes := myControl.ListHostmapIndexes(false) + theirFinalHostmapHosts := theirControl.ListHostmapHosts(false) + theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false) + + // We should only have a single tunnel now on both sides + assert.Len(t, myFinalHostmapHosts, 1) + assert.Len(t, theirFinalHostmapHosts, 1) + assert.Len(t, myFinalHostmapIndexes, 1) + assert.Len(t, theirFinalHostmapIndexes, 1) r.RenderHostmaps("Final hostmaps", myControl, theirControl) myControl.Stop() theirControl.Stop() - //TODO: assert hostmaps +} + +func TestUncleanShutdownRaceLoser(t *testing.T) { + ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) + myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) + + // Teach my how to get to the relay and that their can be reached via the relay + myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) + theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) + + // Build a router so we don't have to reason who gets which packet + r := router.NewR(t, myControl, theirControl) + defer r.RenderFlow() + + // Start the servers + myControl.Start() + theirControl.Start() + + r.Log("Trigger a handshake from me to them") + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) + + p := r.RouteForAllUntilTxTun(theirControl) + assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) + + r.Log("Nuke my hostmap") + myHostmap := myControl.GetHostmap() + myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{} + myHostmap.Indexes = map[uint32]*nebula.HostInfo{} + myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{} + + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again")) + p = r.RouteForAllUntilTxTun(theirControl) + assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) + + r.Log("Assert the tunnel works") + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) + + r.Log("Wait for the dead index to go away") + start := len(theirControl.GetHostmap().Indexes) + for { + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) + if len(theirControl.GetHostmap().Indexes) < start { + break + } + time.Sleep(time.Second) + } + + r.RenderHostmaps("Final hostmaps", myControl, theirControl) +} + +func TestUncleanShutdownRaceWinner(t *testing.T) { + ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) + myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) + + // Teach my how to get to the relay and that their can be reached via the relay + myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) + theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) + + // Build a router so we don't have to reason who gets which packet + r := router.NewR(t, myControl, theirControl) + defer r.RenderFlow() + + // Start the servers + myControl.Start() + theirControl.Start() + + r.Log("Trigger a handshake from me to them") + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) + + p := r.RouteForAllUntilTxTun(theirControl) + assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) + r.RenderHostmaps("Final hostmaps", myControl, theirControl) + + r.Log("Nuke my hostmap") + theirHostmap := theirControl.GetHostmap() + theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{} + theirHostmap.Indexes = map[uint32]*nebula.HostInfo{} + theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{} + + theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again")) + p = r.RouteForAllUntilTxTun(myControl) + assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80) + r.RenderHostmaps("Derp hostmaps", myControl, theirControl) + + r.Log("Assert the tunnel works") + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) + + r.Log("Wait for the dead index to go away") + start := len(myControl.GetHostmap().Indexes) + for { + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) + if len(myControl.GetHostmap().Indexes) < start { + break + } + time.Sleep(time.Second) + } + + r.RenderHostmaps("Final hostmaps", myControl, theirControl) } func TestRelays(t *testing.T) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) - myControl, myVpnIp, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) - relayControl, relayVpnIp, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) - theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) + myControl, myVpnIpNet, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) + relayControl, relayVpnIpNet, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) // Teach my how to get to the relay and that their can be reached via the relay - myControl.InjectLightHouseAddr(relayVpnIp, relayUdpAddr) - myControl.InjectRelays(theirVpnIp, []net.IP{relayVpnIp}) - relayControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) + myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) + myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) + relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) // Build a router so we don't have to reason who gets which packet r := router.NewR(t, myControl, relayControl, theirControl) @@ -234,12 +358,84 @@ func TestRelays(t *testing.T) { theirControl.Start() t.Log("Trigger a handshake from me to them via the relay") - myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) + myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) p := r.RouteForAllUntilTxTun(theirControl) - assertUdpPacket(t, []byte("Hi from me"), p, myVpnIp, theirVpnIp, 80, 80) + r.Log("Assert the tunnel works") + assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl) //TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it } +func TestStage1RaceRelays(t *testing.T) { + //NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay + ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) + myControl, myVpnIpNet, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) + relayControl, relayVpnIpNet, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) + theirControl, theirVpnIpNet, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) + + // Teach my how to get to the relay and that their can be reached via the relay + myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) + theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) + + myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) + theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) + + relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) + relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) + + // Build a router so we don't have to reason who gets which packet + r := router.NewR(t, myControl, relayControl, theirControl) + defer r.RenderFlow() + + // Start the servers + myControl.Start() + relayControl.Start() + theirControl.Start() + + r.Log("Trigger a handshake to start on both me and relay") + myControl.InjectTunUDPPacket(relayVpnIpNet.IP, 80, 80, []byte("Hi from me")) + relayControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from relay")) + + r.Log("Get both stage 1 handshake packets") + //TODO: this is where it breaks, we need to get the hs packets for the relay not for the destination + myHsForThem := myControl.GetFromUDP(true) + relayHsForMe := relayControl.GetFromUDP(true) + + r.Log("Now inject both stage 1 handshake packets") + r.InjectUDPPacket(relayControl, myControl, relayHsForMe) + r.InjectUDPPacket(myControl, relayControl, myHsForThem) + + r.Log("Route for me until I send a message packet to relay") + r.RouteForAllUntilAfterMsgTypeTo(relayControl, header.Message, header.MessageNone) + + r.Log("My cached packet should be received by relay") + myCachedPacket := relayControl.GetFromTun(true) + assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, relayVpnIpNet.IP, 80, 80) + + r.Log("Relays cached packet should be received by me") + relayCachedPacket := r.RouteForAllUntilTxTun(myControl) + assertUdpPacket(t, []byte("Hi from relay"), relayCachedPacket, relayVpnIpNet.IP, myVpnIpNet.IP, 80, 80) + + r.Log("Do a bidirectional tunnel test; me and relay") + assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r) + + r.Log("Create a tunnel between relay and them") + assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r) + + r.RenderHostmaps("Starting hostmaps", myControl, relayControl, theirControl) + + r.Log("Trigger a handshake to start from me to them via the relay") + //TODO: if we initiate a handshake from me and then assert the tunnel it will cause a relay control race that can blow up + // this is a problem that exists on master today + //myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) + assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) + + myControl.Stop() + theirControl.Stop() + relayControl.Stop() + // + ////TODO: assert hostmaps +} + //TODO: add a test with many lies diff --git a/e2e/helpers_test.go b/e2e/helpers_test.go index a378bea..3a2d7b5 100644 --- a/e2e/helpers_test.go +++ b/e2e/helpers_test.go @@ -30,7 +30,7 @@ import ( type m map[string]interface{} // newSimpleServer creates a nebula instance with many assumptions -func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, net.IP, *net.UDPAddr) { +func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr) { l := NewTestLogger() vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}} @@ -101,7 +101,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u panic(err) } - return control, vpnIpNet.IP, &udpAddr + return control, vpnIpNet, &udpAddr } // newTestCaCert will generate a CA cert @@ -231,12 +231,12 @@ func deadline(t *testing.T, seconds time.Duration) doneCb { func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) { // Send a packet from them to me controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B")) - bPacket := r.RouteUntilTxTun(controlB, controlA) + bPacket := r.RouteForAllUntilTxTun(controlA) assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80) // And once more from me to them controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A")) - aPacket := r.RouteUntilTxTun(controlA, controlB) + aPacket := r.RouteForAllUntilTxTun(controlB) assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80) } diff --git a/e2e/router/hostmap.go b/e2e/router/hostmap.go index 948281a..10627fc 100644 --- a/e2e/router/hostmap.go +++ b/e2e/router/hostmap.go @@ -5,9 +5,11 @@ package router import ( "fmt" + "sort" "strings" "github.com/slackhq/nebula" + "github.com/slackhq/nebula/iputil" ) type edge struct { @@ -64,7 +66,8 @@ func renderHostmap(c *nebula.Control) (string, []*edge) { // Draw the vpn to index nodes r += fmt.Sprintf("\t\tsubgraph %s.hosts[\"Hosts (vpn ip to index)\"]\n", clusterName) - for vpnIp, hi := range hm.Hosts { + for _, vpnIp := range sortedHosts(hm.Hosts) { + hi := hm.Hosts[vpnIp] r += fmt.Sprintf("\t\t\t%v.%v[\"%v\"]\n", clusterName, vpnIp, vpnIp) lines = append(lines, fmt.Sprintf("%v.%v --> %v.%v", clusterName, vpnIp, clusterName, hi.GetLocalIndex())) @@ -91,7 +94,8 @@ func renderHostmap(c *nebula.Control) (string, []*edge) { // Draw the local index to relay or remote index nodes r += fmt.Sprintf("\t\tsubgraph indexes.%s[\"Indexes (index to hostinfo)\"]\n", clusterName) - for idx, hi := range hm.Indexes { + for _, idx := range sortedIndexes(hm.Indexes) { + hi := hm.Indexes[idx] r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp()) remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ") globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())}) @@ -107,3 +111,29 @@ func renderHostmap(c *nebula.Control) (string, []*edge) { r += "\tend\n" return r, globalLines } + +func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp { + keys := make([]iputil.VpnIp, 0, len(hosts)) + for key := range hosts { + keys = append(keys, key) + } + + sort.SliceStable(keys, func(i, j int) bool { + return keys[i] > keys[j] + }) + + return keys +} + +func sortedIndexes(indexes map[uint32]*nebula.HostInfo) []uint32 { + keys := make([]uint32, 0, len(indexes)) + for key := range indexes { + keys = append(keys, key) + } + + sort.SliceStable(keys, func(i, j int) bool { + return keys[i] > keys[j] + }) + + return keys +} diff --git a/e2e/router/router.go b/e2e/router/router.go index aa56db8..98bb31d 100644 --- a/e2e/router/router.go +++ b/e2e/router/router.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "reflect" + "sort" "strconv" "strings" "sync" @@ -22,6 +23,7 @@ import ( "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" + "golang.org/x/exp/maps" ) type R struct { @@ -150,6 +152,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R { case <-ctx.Done(): return case <-clockSource.C: + r.renderHostmaps("clock tick") r.renderFlow() } } @@ -220,11 +223,16 @@ func (r *R) renderFlow() { ) } + if len(participantsVals) > 2 { + // Get the first and last participantVals for notes + participantsVals = []string{participantsVals[0], participantsVals[len(participantsVals)-1]} + } + // Print packets h := &header.H{} for _, e := range r.flow { if e.packet == nil { - fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note) + //fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note) continue } @@ -294,6 +302,28 @@ func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) { }) } +func (r *R) renderHostmaps(title string) { + c := maps.Values(r.controls) + sort.SliceStable(c, func(i, j int) bool { + return c[i].GetVpnIp() > c[j].GetVpnIp() + }) + + s := renderHostmaps(c...) + if len(r.additionalGraphs) > 0 { + lastGraph := r.additionalGraphs[len(r.additionalGraphs)-1] + if lastGraph.content == s { + // Ignore this rendering if it matches the last rendering added + // This is useful if you want to track rendering changes + return + } + } + + r.additionalGraphs = append(r.additionalGraphs, mermaidGraph{ + title: title, + content: s, + }) +} + // InjectFlow can be used to record packet flow if the test is handling the routing on its own. // The packet is assumed to have been received func (r *R) InjectFlow(from, to *nebula.Control, p *udp.Packet) { @@ -332,6 +362,8 @@ func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool return nil } + r.renderHostmaps(fmt.Sprintf("Packet %v", len(r.flow))) + if len(r.ignoreFlows) > 0 { var h header.H err := h.Parse(p.Data) diff --git a/go.mod b/go.mod index 8e8a354..d05ab70 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/stretchr/testify v1.8.1 github.com/vishvananda/netlink v1.1.0 golang.org/x/crypto v0.3.0 + golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 golang.org/x/net v0.2.0 golang.org/x/sys v0.2.0 golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 diff --git a/go.sum b/go.sum index 3c5eaa7..cb2db8e 100644 --- a/go.sum +++ b/go.sum @@ -266,6 +266,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI= +golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/handshake_ix.go b/handshake_ix.go index 11a16a6..bb511cc 100644 --- a/handshake_ix.go +++ b/handshake_ix.go @@ -207,9 +207,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b hostinfo.SetRemote(addr) hostinfo.CreateRemoteCIDR(remoteCert) - // Only overwrite existing record if we should win the handshake race - overwrite := vpnIp > f.myVpnIp - existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, overwrite, f) + existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f) if err != nil { switch err { case ErrAlreadySeen: @@ -280,16 +278,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp). Error("Failed to add HostInfo due to localIndex collision") return - case ErrExistingHandshake: - // We have a race where both parties think they are an initiator and this tunnel lost, let the other one finish - f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). - WithField("certName", certName). - WithField("fingerprint", fingerprint). - WithField("issuer", issuer). - WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). - WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). - Error("Prevented a pending handshake race") - return default: // Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete // And we forget to update it here @@ -344,6 +332,12 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []b Info("Handshake message sent") } + if existing != nil { + // Make sure we are tracking the old primary if there was one, it needs to go away eventually + f.connectionManager.Out(existing.localIndexId) + } + + f.connectionManager.Out(hostinfo.localIndexId) hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics) return @@ -501,8 +495,12 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo * hostinfo.CreateRemoteCIDR(remoteCert) // Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp - //TODO: Complete here does not do a race avoidance, it will just take the new tunnel. Is this ok? - f.handshakeManager.Complete(hostinfo, f) + existing := f.handshakeManager.Complete(hostinfo, f) + if existing != nil { + // Make sure we are tracking the old primary if there was one, it needs to go away eventually + f.connectionManager.Out(existing.localIndexId) + } + hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics) f.metricHandshakes.Update(duration) diff --git a/handshake_manager.go b/handshake_manager.go index 4325841..06805b6 100644 --- a/handshake_manager.go +++ b/handshake_manager.go @@ -53,6 +53,10 @@ type HandshakeManager struct { metricTimedOut metrics.Counter l *logrus.Logger + // vpnIps is another map similar to the pending hostmap but tracks entries in the wheel instead + // this is to avoid situations where the same vpn ip enters the wheel and causes rapid fire handshaking + vpnIps map[iputil.VpnIp]struct{} + // can be used to trigger outbound handshake for the given vpnIp trigger chan iputil.VpnIp } @@ -66,6 +70,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [ config: config, trigger: make(chan iputil.VpnIp, config.triggerBuffer), OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)), + vpnIps: map[iputil.VpnIp]struct{}{}, messageMetrics: config.messageMetrics, metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil), metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil), @@ -103,6 +108,7 @@ func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.E func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) { hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp) if err != nil { + delete(c.vpnIps, vpnIp) return } hostinfo.Lock() @@ -160,7 +166,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l c.lightHouse.QueryServer(vpnIp, f) } - // Send a the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply + // Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply var sentTo []*udp.Addr hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) { c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1) @@ -260,7 +266,6 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, l // If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add if !lighthouseTriggered { - //TODO: feel like we dupe handshake real fast in a tight loop, why? c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) } } @@ -269,7 +274,10 @@ func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *H hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init) if created { - c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval) + if _, ok := c.vpnIps[vpnIp]; !ok { + c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval) + } + c.vpnIps[vpnIp] = struct{}{} c.metricInitiated.Inc(1) } @@ -280,7 +288,6 @@ var ( ErrExistingHostInfo = errors.New("existing hostinfo") ErrAlreadySeen = errors.New("already seen") ErrLocalIndexCollision = errors.New("local index collision") - ErrExistingHandshake = errors.New("existing handshake") ) // CheckAndComplete checks for any conflicts in the main and pending hostmap @@ -294,7 +301,7 @@ var ( // // ErrLocalIndexCollision if we already have an entry in the main or pending // hostmap for the hostinfo.localIndexId. -func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, overwrite bool, f *Interface) (*HostInfo, error) { +func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) { c.pendingHostMap.Lock() defer c.pendingHostMap.Unlock() c.mainHostMap.Lock() @@ -303,9 +310,14 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket // Check if we already have a tunnel with this vpn ip existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp] if found && existingHostInfo != nil { - // Is it just a delayed handshake packet? - if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) { - return existingHostInfo, ErrAlreadySeen + testHostInfo := existingHostInfo + for testHostInfo != nil { + // Is it just a delayed handshake packet? + if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) { + return existingHostInfo, ErrAlreadySeen + } + + testHostInfo = testHostInfo.next } // Is this a newer handshake? @@ -337,56 +349,19 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket Info("New host shadows existing host remoteIndex") } - // Check if we are also handshaking with this vpn ip - pendingHostInfo, found := c.pendingHostMap.Hosts[hostinfo.vpnIp] - if found && pendingHostInfo != nil { - if !overwrite { - // We won, let our pending handshake win - return pendingHostInfo, ErrExistingHandshake - } - - // We lost, take this handshake and move any cached packets over so they get sent - pendingHostInfo.ConnectionState.queueLock.Lock() - hostinfo.packetStore = append(hostinfo.packetStore, pendingHostInfo.packetStore...) - c.pendingHostMap.unlockedDeleteHostInfo(pendingHostInfo) - pendingHostInfo.ConnectionState.queueLock.Unlock() - pendingHostInfo.logger(c.l).Info("Handshake race lost, replacing pending handshake with completed tunnel") - } - - if existingHostInfo != nil { - // We are going to overwrite this entry, so remove the old references - delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp) - delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId) - delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId) - for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() { - delete(c.mainHostMap.Relays, relayIdx) - } - } - - c.mainHostMap.addHostInfo(hostinfo, f) + c.mainHostMap.unlockedAddHostInfo(hostinfo, f) return existingHostInfo, nil } // Complete is a simpler version of CheckAndComplete when we already know we // won't have a localIndexId collision because we already have an entry in the -// pendingHostMap -func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) { +// pendingHostMap. An existing hostinfo is returned if there was one. +func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) *HostInfo { c.pendingHostMap.Lock() defer c.pendingHostMap.Unlock() c.mainHostMap.Lock() defer c.mainHostMap.Unlock() - existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp] - if found && existingHostInfo != nil { - // We are going to overwrite this entry, so remove the old references - delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp) - delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId) - delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId) - for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() { - delete(c.mainHostMap.Relays, relayIdx) - } - } - existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId] if found && existingRemoteIndex != nil { // We have a collision, but this can happen since we can't control @@ -396,8 +371,10 @@ func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) { Info("New host shadows existing host remoteIndex") } - c.mainHostMap.addHostInfo(hostinfo, f) + existingHostInfo := c.mainHostMap.Hosts[hostinfo.vpnIp] + c.mainHostMap.unlockedAddHostInfo(hostinfo, f) c.pendingHostMap.unlockedDeleteHostInfo(hostinfo) + return existingHostInfo } // AddIndexHostInfo generates a unique localIndexId for this HostInfo diff --git a/hostmap.go b/hostmap.go index 372333e..231beb1 100644 --- a/hostmap.go +++ b/hostmap.go @@ -23,6 +23,10 @@ const PromoteEvery = 1000 const ReQueryEvery = 5000 const MaxRemotes = 10 +// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip +// 5 allows for an initial handshake and each host pair re-handshaking twice +const MaxHostInfosPerVpnIp = 5 + // How long we should prevent roaming back to the previous IP. // This helps prevent flapping due to packets already in flight const RoamingSuppressSeconds = 2 @@ -180,6 +184,10 @@ type HostInfo struct { lastRoam time.Time lastRoamRemote *udp.Addr + + // Used to track other hostinfos for this vpn ip since only 1 can be primary + // Synchronised via hostmap lock and not the hostinfo lock. + next, prev *HostInfo } type ViaSender struct { @@ -395,9 +403,12 @@ func (hm *HostMap) DeleteReverseIndex(index uint32) { } } -func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) { +// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip +func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool { // Delete the host itself, ensuring it's not modified anymore hm.Lock() + // If we have a previous or next hostinfo then we are not the last one for this vpn ip + final := (hostinfo.next == nil && hostinfo.prev == nil) hm.unlockedDeleteHostInfo(hostinfo) hm.Unlock() @@ -421,6 +432,8 @@ func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) { for _, localIdx := range teardownRelayIdx { hm.RemoveRelay(localIdx) } + + return final } func (hm *HostMap) DeleteRelayIdx(localIdx uint32) { @@ -429,29 +442,81 @@ func (hm *HostMap) DeleteRelayIdx(localIdx uint32) { delete(hm.RemoteIndexes, localIdx) } -func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) { - // Check if this same hostId is in the hostmap with a different instance. - // This could happen if we have an entry in the pending hostmap with different - // index values than the one in the main hostmap. - hostinfo2, ok := hm.Hosts[hostinfo.vpnIp] - if ok && hostinfo2 != hostinfo { - delete(hm.Hosts, hostinfo2.vpnIp) - delete(hm.Indexes, hostinfo2.localIndexId) - delete(hm.RemoteIndexes, hostinfo2.remoteIndexId) +func (hm *HostMap) MakePrimary(hostinfo *HostInfo) { + hm.Lock() + defer hm.Unlock() + hm.unlockedMakePrimary(hostinfo) +} + +func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) { + oldHostinfo := hm.Hosts[hostinfo.vpnIp] + if oldHostinfo == hostinfo { + return } - delete(hm.Hosts, hostinfo.vpnIp) - if len(hm.Hosts) == 0 { - hm.Hosts = map[iputil.VpnIp]*HostInfo{} + if hostinfo.prev != nil { + hostinfo.prev.next = hostinfo.next } + + if hostinfo.next != nil { + hostinfo.next.prev = hostinfo.prev + } + + hm.Hosts[hostinfo.vpnIp] = hostinfo + + if oldHostinfo == nil { + return + } + + hostinfo.next = oldHostinfo + oldHostinfo.prev = hostinfo + hostinfo.prev = nil +} + +func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) { + primary, ok := hm.Hosts[hostinfo.vpnIp] + if ok && primary == hostinfo { + // The vpnIp pointer points to the same hostinfo as the local index id, we can remove it + delete(hm.Hosts, hostinfo.vpnIp) + if len(hm.Hosts) == 0 { + hm.Hosts = map[iputil.VpnIp]*HostInfo{} + } + + if hostinfo.next != nil { + // We had more than 1 hostinfo at this vpnip, promote the next in the list to primary + hm.Hosts[hostinfo.vpnIp] = hostinfo.next + // It is primary, there is no previous hostinfo now + hostinfo.next.prev = nil + } + + } else { + // Relink if we were in the middle of multiple hostinfos for this vpn ip + if hostinfo.prev != nil { + hostinfo.prev.next = hostinfo.next + } + + if hostinfo.next != nil { + hostinfo.next.prev = hostinfo.prev + } + } + + hostinfo.next = nil + hostinfo.prev = nil + + // The remote index uses index ids outside our control so lets make sure we are only removing + // the remote index pointer here if it points to the hostinfo we are deleting + hostinfo2, ok := hm.RemoteIndexes[hostinfo.remoteIndexId] + if ok && hostinfo2 == hostinfo { + delete(hm.RemoteIndexes, hostinfo.remoteIndexId) + if len(hm.RemoteIndexes) == 0 { + hm.RemoteIndexes = map[uint32]*HostInfo{} + } + } + delete(hm.Indexes, hostinfo.localIndexId) if len(hm.Indexes) == 0 { hm.Indexes = map[uint32]*HostInfo{} } - delete(hm.RemoteIndexes, hostinfo.remoteIndexId) - if len(hm.RemoteIndexes) == 0 { - hm.RemoteIndexes = map[uint32]*HostInfo{} - } if hm.l.Level >= logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts), @@ -520,15 +585,22 @@ func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*Host return nil, errors.New("unable to find host") } -// We already have the hm Lock when this is called, so make sure to not call -// any other methods that might try to grab it again -func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) { +// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps. +// If an entry exists for the Hosts table (vpnIp -> hostinfo) then the provided hostinfo will be made primary +func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) { if f.serveDns { remoteCert := hostinfo.ConnectionState.peerCert dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String()) } + existing := hm.Hosts[hostinfo.vpnIp] hm.Hosts[hostinfo.vpnIp] = hostinfo + + if existing != nil { + hostinfo.next = existing + existing.prev = hostinfo + } + hm.Indexes[hostinfo.localIndexId] = hostinfo hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo @@ -537,6 +609,16 @@ func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) { "hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}). Debug("Hostmap vpnIp added") } + + i := 1 + check := hostinfo + for check != nil { + if i > MaxHostInfosPerVpnIp { + hm.unlockedDeleteHostInfo(check) + } + check = check.next + i++ + } } // punchList assembles a list of all non nil RemoteList pointer entries in this hostmap diff --git a/hostmap_test.go b/hostmap_test.go index 2808317..e523a21 100644 --- a/hostmap_test.go +++ b/hostmap_test.go @@ -1 +1,207 @@ package nebula + +import ( + "net" + "testing" + + "github.com/slackhq/nebula/test" + "github.com/stretchr/testify/assert" +) + +func TestHostMap_MakePrimary(t *testing.T) { + l := test.NewLogger() + hm := NewHostMap( + l, "test", + &net.IPNet{ + IP: net.IP{10, 0, 0, 1}, + Mask: net.IPMask{255, 255, 255, 0}, + }, + []*net.IPNet{}, + ) + + f := &Interface{} + + h1 := &HostInfo{vpnIp: 1, localIndexId: 1} + h2 := &HostInfo{vpnIp: 1, localIndexId: 2} + h3 := &HostInfo{vpnIp: 1, localIndexId: 3} + h4 := &HostInfo{vpnIp: 1, localIndexId: 4} + + hm.unlockedAddHostInfo(h4, f) + hm.unlockedAddHostInfo(h3, f) + hm.unlockedAddHostInfo(h2, f) + hm.unlockedAddHostInfo(h1, f) + + // Make sure we go h1 -> h2 -> h3 -> h4 + prim, _ := hm.QueryVpnIp(1) + assert.Equal(t, h1.localIndexId, prim.localIndexId) + assert.Equal(t, h2.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h1.localIndexId, h2.prev.localIndexId) + assert.Equal(t, h3.localIndexId, h2.next.localIndexId) + assert.Equal(t, h2.localIndexId, h3.prev.localIndexId) + assert.Equal(t, h4.localIndexId, h3.next.localIndexId) + assert.Equal(t, h3.localIndexId, h4.prev.localIndexId) + assert.Nil(t, h4.next) + + // Swap h3/middle to primary + hm.MakePrimary(h3) + + // Make sure we go h3 -> h1 -> h2 -> h4 + prim, _ = hm.QueryVpnIp(1) + assert.Equal(t, h3.localIndexId, prim.localIndexId) + assert.Equal(t, h1.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h2.localIndexId, h1.next.localIndexId) + assert.Equal(t, h3.localIndexId, h1.prev.localIndexId) + assert.Equal(t, h4.localIndexId, h2.next.localIndexId) + assert.Equal(t, h1.localIndexId, h2.prev.localIndexId) + assert.Equal(t, h2.localIndexId, h4.prev.localIndexId) + assert.Nil(t, h4.next) + + // Swap h4/tail to primary + hm.MakePrimary(h4) + + // Make sure we go h4 -> h3 -> h1 -> h2 + prim, _ = hm.QueryVpnIp(1) + assert.Equal(t, h4.localIndexId, prim.localIndexId) + assert.Equal(t, h3.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h1.localIndexId, h3.next.localIndexId) + assert.Equal(t, h4.localIndexId, h3.prev.localIndexId) + assert.Equal(t, h2.localIndexId, h1.next.localIndexId) + assert.Equal(t, h3.localIndexId, h1.prev.localIndexId) + assert.Equal(t, h1.localIndexId, h2.prev.localIndexId) + assert.Nil(t, h2.next) + + // Swap h4 again should be no-op + hm.MakePrimary(h4) + + // Make sure we go h4 -> h3 -> h1 -> h2 + prim, _ = hm.QueryVpnIp(1) + assert.Equal(t, h4.localIndexId, prim.localIndexId) + assert.Equal(t, h3.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h1.localIndexId, h3.next.localIndexId) + assert.Equal(t, h4.localIndexId, h3.prev.localIndexId) + assert.Equal(t, h2.localIndexId, h1.next.localIndexId) + assert.Equal(t, h3.localIndexId, h1.prev.localIndexId) + assert.Equal(t, h1.localIndexId, h2.prev.localIndexId) + assert.Nil(t, h2.next) +} + +func TestHostMap_DeleteHostInfo(t *testing.T) { + l := test.NewLogger() + hm := NewHostMap( + l, "test", + &net.IPNet{ + IP: net.IP{10, 0, 0, 1}, + Mask: net.IPMask{255, 255, 255, 0}, + }, + []*net.IPNet{}, + ) + + f := &Interface{} + + h1 := &HostInfo{vpnIp: 1, localIndexId: 1} + h2 := &HostInfo{vpnIp: 1, localIndexId: 2} + h3 := &HostInfo{vpnIp: 1, localIndexId: 3} + h4 := &HostInfo{vpnIp: 1, localIndexId: 4} + h5 := &HostInfo{vpnIp: 1, localIndexId: 5} + h6 := &HostInfo{vpnIp: 1, localIndexId: 6} + + hm.unlockedAddHostInfo(h6, f) + hm.unlockedAddHostInfo(h5, f) + hm.unlockedAddHostInfo(h4, f) + hm.unlockedAddHostInfo(h3, f) + hm.unlockedAddHostInfo(h2, f) + hm.unlockedAddHostInfo(h1, f) + + // h6 should be deleted + assert.Nil(t, h6.next) + assert.Nil(t, h6.prev) + _, err := hm.QueryIndex(h6.localIndexId) + assert.Error(t, err) + + // Make sure we go h1 -> h2 -> h3 -> h4 -> h5 + prim, _ := hm.QueryVpnIp(1) + assert.Equal(t, h1.localIndexId, prim.localIndexId) + assert.Equal(t, h2.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h1.localIndexId, h2.prev.localIndexId) + assert.Equal(t, h3.localIndexId, h2.next.localIndexId) + assert.Equal(t, h2.localIndexId, h3.prev.localIndexId) + assert.Equal(t, h4.localIndexId, h3.next.localIndexId) + assert.Equal(t, h3.localIndexId, h4.prev.localIndexId) + assert.Equal(t, h5.localIndexId, h4.next.localIndexId) + assert.Equal(t, h4.localIndexId, h5.prev.localIndexId) + assert.Nil(t, h5.next) + + // Delete primary + hm.DeleteHostInfo(h1) + assert.Nil(t, h1.prev) + assert.Nil(t, h1.next) + + // Make sure we go h2 -> h3 -> h4 -> h5 + prim, _ = hm.QueryVpnIp(1) + assert.Equal(t, h2.localIndexId, prim.localIndexId) + assert.Equal(t, h3.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h3.localIndexId, h2.next.localIndexId) + assert.Equal(t, h2.localIndexId, h3.prev.localIndexId) + assert.Equal(t, h4.localIndexId, h3.next.localIndexId) + assert.Equal(t, h3.localIndexId, h4.prev.localIndexId) + assert.Equal(t, h5.localIndexId, h4.next.localIndexId) + assert.Equal(t, h4.localIndexId, h5.prev.localIndexId) + assert.Nil(t, h5.next) + + // Delete in the middle + hm.DeleteHostInfo(h3) + assert.Nil(t, h3.prev) + assert.Nil(t, h3.next) + + // Make sure we go h2 -> h4 -> h5 + prim, _ = hm.QueryVpnIp(1) + assert.Equal(t, h2.localIndexId, prim.localIndexId) + assert.Equal(t, h4.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h4.localIndexId, h2.next.localIndexId) + assert.Equal(t, h2.localIndexId, h4.prev.localIndexId) + assert.Equal(t, h5.localIndexId, h4.next.localIndexId) + assert.Equal(t, h4.localIndexId, h5.prev.localIndexId) + assert.Nil(t, h5.next) + + // Delete the tail + hm.DeleteHostInfo(h5) + assert.Nil(t, h5.prev) + assert.Nil(t, h5.next) + + // Make sure we go h2 -> h4 + prim, _ = hm.QueryVpnIp(1) + assert.Equal(t, h2.localIndexId, prim.localIndexId) + assert.Equal(t, h4.localIndexId, prim.next.localIndexId) + assert.Nil(t, prim.prev) + assert.Equal(t, h4.localIndexId, h2.next.localIndexId) + assert.Equal(t, h2.localIndexId, h4.prev.localIndexId) + assert.Nil(t, h4.next) + + // Delete the head + hm.DeleteHostInfo(h2) + assert.Nil(t, h2.prev) + assert.Nil(t, h2.next) + + // Make sure we only have h4 + prim, _ = hm.QueryVpnIp(1) + assert.Equal(t, h4.localIndexId, prim.localIndexId) + assert.Nil(t, prim.prev) + assert.Nil(t, prim.next) + assert.Nil(t, h4.next) + + // Delete the only item + hm.DeleteHostInfo(h4) + assert.Nil(t, h4.prev) + assert.Nil(t, h4.next) + + // Make sure we have nil + prim, _ = hm.QueryVpnIp(1) + assert.Nil(t, prim) +} diff --git a/outside.go b/outside.go index c43a385..605325d 100644 --- a/outside.go +++ b/outside.go @@ -245,9 +245,11 @@ func (f *Interface) closeTunnel(hostInfo *HostInfo) { //TODO: this would be better as a single function in ConnectionManager that handled locks appropriately f.connectionManager.ClearLocalIndex(hostInfo.localIndexId) f.connectionManager.ClearPendingDeletion(hostInfo.localIndexId) - f.lightHouse.DeleteVpnIp(hostInfo.vpnIp) - - f.hostMap.DeleteHostInfo(hostInfo) + final := f.hostMap.DeleteHostInfo(hostInfo) + if final { + // We no longer have any tunnels with this vpn ip, clear learned lighthouse state to lower memory usage + f.lightHouse.DeleteVpnIp(hostInfo.vpnIp) + } } // sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote diff --git a/overlay/tun_tester.go b/overlay/tun_tester.go index a4ee20b..442a9b5 100644 --- a/overlay/tun_tester.go +++ b/overlay/tun_tester.go @@ -51,7 +51,7 @@ func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int // packets should exit the udp side, capture them with udpConn.Get func (t *TestTun) Send(packet []byte) { if t.l.Level >= logrus.InfoLevel { - t.l.WithField("dataLen", len(packet)).Info("Tun receiving injected packet") + t.l.WithField("dataLen", len(packet)).Debug("Tun receiving injected packet") } t.rxPackets <- packet } diff --git a/relay_manager.go b/relay_manager.go index 95807bd..080d144 100644 --- a/relay_manager.go +++ b/relay_manager.go @@ -61,6 +61,11 @@ func AddRelay(l *logrus.Logger, relayHostInfo *HostInfo, hm *HostMap, vpnIp iput _, inRelays := hm.Relays[index] if !inRelays { + // Avoid standing up a relay that can't be used since only the primary hostinfo + // will be pointed to by the relay logic + //TODO: if there was an existing primary and it had relay state, should we merge? + hm.unlockedMakePrimary(relayHostInfo) + hm.Relays[index] = relayHostInfo newRelay := Relay{ Type: relayType, diff --git a/ssh.go b/ssh.go index f8050ff..7b9e28a 100644 --- a/ssh.go +++ b/ssh.go @@ -22,8 +22,9 @@ import ( ) type sshListHostMapFlags struct { - Json bool - Pretty bool + Json bool + Pretty bool + ByIndex bool } type sshPrintCertFlags struct { @@ -174,6 +175,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap s := sshListHostMapFlags{} fl.BoolVar(&s.Json, "json", false, "outputs as json with more information") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") + fl.BoolVar(&s.ByIndex, "by-index", false, "gets all hosts in the hostmap from the index table") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { @@ -189,6 +191,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap s := sshListHostMapFlags{} fl.BoolVar(&s.Json, "json", false, "outputs as json with more information") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") + fl.BoolVar(&s.ByIndex, "by-index", false, "gets all hosts in the hostmap from the index table") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { @@ -368,7 +371,13 @@ func sshListHostMap(hostMap *HostMap, a interface{}, w sshd.StringWriter) error return nil } - hm := listHostMap(hostMap) + var hm []ControlHostInfo + if fs.ByIndex { + hm = listHostMapIndexes(hostMap) + } else { + hm = listHostMapHosts(hostMap) + } + sort.Slice(hm, func(i, j int) bool { return bytes.Compare(hm[i].VpnIp, hm[j].VpnIp) < 0 }) diff --git a/udp/udp_tester.go b/udp/udp_tester.go index 55213b8..b3e2498 100644 --- a/udp/udp_tester.go +++ b/udp/udp_tester.go @@ -66,7 +66,7 @@ func (u *Conn) Send(packet *Packet) { u.l.WithField("header", h). WithField("udpAddr", fmt.Sprintf("%v:%v", packet.FromIp, packet.FromPort)). WithField("dataLen", len(packet.Data)). - Info("UDP receiving injected packet") + Debug("UDP receiving injected packet") } u.RxPackets <- packet } From f0ac61c1f0c16e95d27a3c23385f065eebbdd1e3 Mon Sep 17 00:00:00 2001 From: Caleb Jasik Date: Mon, 13 Mar 2023 13:16:46 -0500 Subject: [PATCH 26/26] Add `nebula.plist` based on the homebrew nebula LaunchDaemon plist (#762) --- examples/service_scripts/nebula.plist | 34 +++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 examples/service_scripts/nebula.plist diff --git a/examples/service_scripts/nebula.plist b/examples/service_scripts/nebula.plist new file mode 100644 index 0000000..c423cfc --- /dev/null +++ b/examples/service_scripts/nebula.plist @@ -0,0 +1,34 @@ + + + + + KeepAlive + + Label + net.defined.nebula + WorkingDirectory + /Users/{username}/.local/bin/nebula + LimitLoadToSessionType + + Aqua + Background + LoginWindow + StandardIO + System + + ProgramArguments + + ./nebula + -config + ./config.yml + + RunAtLoad + + StandardErrorPath + ./nebula.log + StandardOutPath + ./nebula.log + UserName + root + + \ No newline at end of file