Lighthouse reload support (#649)

Co-authored-by: John Maguire <contact@johnmaguire.me>
This commit is contained in:
Nate Brown 2022-03-14 12:35:13 -05:00 committed by GitHub
parent bbe0a032bb
commit 312a01dc09
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 471 additions and 222 deletions

View File

@ -11,6 +11,7 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync"
"syscall" "syscall"
"time" "time"
@ -26,6 +27,7 @@ type C struct {
oldSettings map[interface{}]interface{} oldSettings map[interface{}]interface{}
callbacks []func(*C) callbacks []func(*C)
l *logrus.Logger l *logrus.Logger
reloadLock sync.Mutex
} }
func NewC(l *logrus.Logger) *C { func NewC(l *logrus.Logger) *C {
@ -133,6 +135,9 @@ func (c *C) CatchHUP(ctx context.Context) {
} }
func (c *C) ReloadConfig() { func (c *C) ReloadConfig() {
c.reloadLock.Lock()
defer c.reloadLock.Unlock()
c.oldSettings = make(map[interface{}]interface{}) c.oldSettings = make(map[interface{}]interface{})
for k, v := range c.Settings { for k, v := range c.Settings {
c.oldSettings[k] = v c.oldSettings[k] = v
@ -149,6 +154,27 @@ func (c *C) ReloadConfig() {
} }
} }
func (c *C) ReloadConfigString(raw string) error {
c.reloadLock.Lock()
defer c.reloadLock.Unlock()
c.oldSettings = make(map[interface{}]interface{})
for k, v := range c.Settings {
c.oldSettings[k] = v
}
err := c.LoadString(raw)
if err != nil {
return err
}
for _, v := range c.callbacks {
v(c)
}
return nil
}
// GetString will get the string for k or return the default d if not found or invalid // GetString will get the string for k or return the default d if not found or invalid
func (c *C) GetString(k, d string) string { func (c *C) GetString(k, d string) string {
r := c.Get(k) r := c.Get(k)

View File

@ -35,7 +35,7 @@ func Test_NewConnectionManagerTest(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := NewLightHouse(l, false, &net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{}, 1000, 0, &udp.Conn{}, false, 1, false) lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
@ -104,7 +104,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := NewLightHouse(l, false, &net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{}, 1000, 0, &udp.Conn{}, false, 1, false) lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
@ -213,7 +213,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := NewLightHouse(l, false, &net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{}, 1000, 0, &udp.Conn{}, false, 1, false) lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},

View File

@ -160,9 +160,10 @@ func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) { func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
//TODO: this is probably better as a function in ConnectionManager or HostMap directly //TODO: this is probably better as a function in ConnectionManager or HostMap directly
c.f.hostMap.Lock() c.f.hostMap.Lock()
lighthouses := c.f.lightHouse.GetLighthouses()
for _, h := range c.f.hostMap.Hosts { for _, h := range c.f.hostMap.Hosts {
if excludeLighthouses { if excludeLighthouses {
if _, ok := c.f.lightHouse.lighthouses[h.vpnIp]; ok { if _, ok := lighthouses[h.vpnIp]; ok {
continue continue
} }
} }

View File

@ -7,7 +7,7 @@ import (
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, packet []byte, h *header.H, hostinfo *HostInfo) { func HandleIncomingHandshake(f *Interface, addr *udp.Addr, packet []byte, h *header.H, hostinfo *HostInfo) {
// First remote allow list check before we know the vpnIp // First remote allow list check before we know the vpnIp
if !f.lightHouse.remoteAllowList.AllowUnknownVpnIp(addr.IP) { if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return return
} }

View File

@ -114,7 +114,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, packet []byte, h *header.H)
return return
} }
if !f.lightHouse.remoteAllowList.Allow(vpnIp, addr.IP) { if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.IP) {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return return
} }
@ -321,7 +321,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, hostinfo *HostInfo, packet
hostinfo.Lock() hostinfo.Lock()
defer hostinfo.Unlock() defer hostinfo.Unlock()
if !f.lightHouse.remoteAllowList.Allow(hostinfo.vpnIp, addr.IP) { if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return false return false
} }

View File

@ -21,8 +21,12 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{} mw := &mockEncWriter{}
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
lh := &LightHouse{
atomicStaticList: make(map[iputil.VpnIp]struct{}),
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
}
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, &LightHouse{}, &udp.Conn{}, defaultHandshakeConfig) blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
now := time.Now() now := time.Now()
blah.NextOutboundHandshakeTimerTick(now, mw) blah.NextOutboundHandshakeTimerTick(now, mw)
@ -74,7 +78,12 @@ func Test_NewHandshakeManagerTrigger(t *testing.T) {
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{} mw := &mockEncWriter{}
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
lh := &LightHouse{addrMap: make(map[iputil.VpnIp]*RemoteList), l: l} lh := &LightHouse{
addrMap: make(map[iputil.VpnIp]*RemoteList),
l: l,
atomicStaticList: make(map[iputil.VpnIp]struct{}),
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
}
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)

View File

@ -110,7 +110,7 @@ func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
// If this is a static host, we don't need to wait for the HostQueryReply // If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now // We can trigger the handshake right now
if _, ok := f.lightHouse.staticList[vpnIp]; ok { if _, ok := f.lightHouse.GetStaticHostList()[vpnIp]; ok {
select { select {
case f.handshakeManager.trigger <- vpnIp: case f.handshakeManager.trigger <- vpnIp:
default: default:

View File

@ -7,14 +7,18 @@ import (
"fmt" "fmt"
"net" "net"
"sync" "sync"
"sync/atomic"
"time" "time"
"unsafe"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
"github.com/slackhq/nebula/util"
) )
//TODO: if a lighthouse doesn't have an answer, clients AGGRESSIVELY REQUERY.. why? handshake manager and/or getOrHandshake? //TODO: if a lighthouse doesn't have an answer, clients AGGRESSIVELY REQUERY.. why? handshake manager and/or getOrHandshake?
@ -28,7 +32,9 @@ type LightHouse struct {
amLighthouse bool amLighthouse bool
myVpnIp iputil.VpnIp myVpnIp iputil.VpnIp
myVpnZeros iputil.VpnIp myVpnZeros iputil.VpnIp
myVpnNet *net.IPNet
punchConn *udp.Conn punchConn *udp.Conn
punchy *Punchy
// Local cache of answers from light houses // Local cache of answers from light houses
// map of vpn Ip to answers // map of vpn Ip to answers
@ -39,80 +45,240 @@ type LightHouse struct {
// respond with. // respond with.
// - When we are not a lighthouse, this filters which addresses we accept // - When we are not a lighthouse, this filters which addresses we accept
// from lighthouses. // from lighthouses.
remoteAllowList *RemoteAllowList atomicRemoteAllowList *RemoteAllowList
// filters local addresses that we advertise to lighthouses // filters local addresses that we advertise to lighthouses
localAllowList *LocalAllowList atomicLocalAllowList *LocalAllowList
// used to trigger the HandshakeManager when we receive HostQueryReply // used to trigger the HandshakeManager when we receive HostQueryReply
handshakeTrigger chan<- iputil.VpnIp handshakeTrigger chan<- iputil.VpnIp
// staticList exists to avoid having a bool in each addrMap entry // atomicStaticList exists to avoid having a bool in each addrMap entry
// since static should be rare // since static should be rare
staticList map[iputil.VpnIp]struct{} atomicStaticList map[iputil.VpnIp]struct{}
lighthouses map[iputil.VpnIp]struct{} atomicLighthouses map[iputil.VpnIp]struct{}
interval int
nebulaPort uint32 // 32 bits because protobuf does not have a uint16 atomicInterval int64
punchBack bool updateCancel context.CancelFunc
punchDelay time.Duration updateParentCtx context.Context
updateUdp udp.EncWriter
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
metrics *MessageMetrics metrics *MessageMetrics
metricHolepunchTx metrics.Counter metricHolepunchTx metrics.Counter
l *logrus.Logger l *logrus.Logger
} }
func NewLightHouse(l *logrus.Logger, amLighthouse bool, myVpnIpNet *net.IPNet, ips []iputil.VpnIp, interval int, nebulaPort uint32, pc *udp.Conn, punchBack bool, punchDelay time.Duration, metricsEnabled bool) *LightHouse { // NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
ones, _ := myVpnIpNet.Mask.Size() // addrMap should be nil unless this is during a config reload
h := LightHouse{ func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc *udp.Conn, p *Punchy) (*LightHouse, error) {
amLighthouse: amLighthouse, amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
myVpnIp: iputil.Ip2VpnIp(myVpnIpNet.IP), nebulaPort := uint32(c.GetInt("listen.port", 0))
myVpnZeros: iputil.VpnIp(32 - ones), if amLighthouse && nebulaPort == 0 {
addrMap: make(map[iputil.VpnIp]*RemoteList), return nil, util.NewContextualError("lighthouse.am_lighthouse enabled on node but no port number is set in config", nil, nil)
nebulaPort: nebulaPort,
lighthouses: make(map[iputil.VpnIp]struct{}),
staticList: make(map[iputil.VpnIp]struct{}),
interval: interval,
punchConn: pc,
punchBack: punchBack,
punchDelay: punchDelay,
l: l,
} }
if metricsEnabled { ones, _ := myVpnNet.Mask.Size()
h.metrics = newLighthouseMetrics() h := LightHouse{
amLighthouse: amLighthouse,
myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP),
myVpnZeros: iputil.VpnIp(32 - ones),
myVpnNet: myVpnNet,
addrMap: make(map[iputil.VpnIp]*RemoteList),
nebulaPort: nebulaPort,
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
atomicStaticList: make(map[iputil.VpnIp]struct{}),
punchConn: pc,
punchy: p,
l: l,
}
if c.GetBool("stats.lighthouse_metrics", false) {
h.metrics = newLighthouseMetrics()
h.metricHolepunchTx = metrics.GetOrRegisterCounter("messages.tx.holepunch", nil) h.metricHolepunchTx = metrics.GetOrRegisterCounter("messages.tx.holepunch", nil)
} else { } else {
h.metricHolepunchTx = metrics.NilCounter{} h.metricHolepunchTx = metrics.NilCounter{}
} }
for _, ip := range ips { err := h.reload(c, true)
h.lighthouses[ip] = struct{}{} if err != nil {
return nil, err
} }
return &h c.RegisterReloadCallback(func(c *config.C) {
err := h.reload(c, false)
switch v := err.(type) {
case util.ContextualError:
v.Log(l)
case error:
l.WithError(err).Error("failed to reload lighthouse")
}
})
return &h, nil
} }
func (lh *LightHouse) SetRemoteAllowList(allowList *RemoteAllowList) { func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
lh.Lock() return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList))))
defer lh.Unlock()
lh.remoteAllowList = allowList
} }
func (lh *LightHouse) SetLocalAllowList(allowList *LocalAllowList) { func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
lh.Lock() return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses))))
defer lh.Unlock()
lh.localAllowList = allowList
} }
func (lh *LightHouse) ValidateLHStaticEntries() error { func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList {
for lhIP, _ := range lh.lighthouses { return (*RemoteAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList))))
if _, ok := lh.staticList[lhIP]; !ok { }
return fmt.Errorf("Lighthouse %s does not have a static_host_map entry", lhIP)
func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
return (*LocalAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList))))
}
func (lh *LightHouse) GetUpdateInterval() int64 {
return atomic.LoadInt64(&lh.atomicInterval)
}
func (lh *LightHouse) reload(c *config.C, initial bool) error {
if initial || c.HasChanged("lighthouse.interval") {
atomic.StoreInt64(&lh.atomicInterval, int64(c.GetInt("lighthouse.interval", 10)))
if !initial {
lh.l.Infof("lighthouse.interval changed to %v", lh.atomicInterval)
if lh.updateCancel != nil {
// May not always have a running routine
lh.updateCancel()
}
lh.LhUpdateWorker(lh.updateParentCtx, lh.updateUdp)
} }
} }
if initial || c.HasChanged("lighthouse.remote_allow_list") || c.HasChanged("lighthouse.remote_allow_ranges") {
ral, err := NewRemoteAllowListFromConfig(c, "lighthouse.remote_allow_list", "lighthouse.remote_allow_ranges")
if err != nil {
return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)), unsafe.Pointer(ral))
if !initial {
//TODO: a diff will be annoyingly difficult
lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed")
}
}
if initial || c.HasChanged("lighthouse.local_allow_list") {
lal, err := NewLocalAllowListFromConfig(c, "lighthouse.local_allow_list")
if err != nil {
return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)), unsafe.Pointer(lal))
if !initial {
//TODO: a diff will be annoyingly difficult
lh.l.Info("lighthouse.local_allow_list has changed")
}
}
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
if initial || c.HasChanged("static_host_map") {
staticList := make(map[iputil.VpnIp]struct{})
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
if err != nil {
return err
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)), unsafe.Pointer(&staticList))
if !initial {
//TODO: we should remove any remote list entries for static hosts that were removed/modified?
lh.l.Info("static_host_map has changed")
}
}
if initial || c.HasChanged("lighthouse.hosts") {
lhMap := make(map[iputil.VpnIp]struct{})
err := lh.parseLighthouses(c, lh.myVpnNet, lhMap)
if err != nil {
return err
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)), unsafe.Pointer(&lhMap))
if !initial {
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
lh.l.Info("lighthouse.hosts has changed")
}
}
return nil
}
func (lh *LightHouse) parseLighthouses(c *config.C, tunCidr *net.IPNet, lhMap map[iputil.VpnIp]struct{}) error {
lhs := c.GetStringSlice("lighthouse.hosts", []string{})
if lh.amLighthouse && len(lhs) != 0 {
lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
}
for i, host := range lhs {
ip := net.ParseIP(host)
if ip == nil {
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, nil)
}
if !tunCidr.Contains(ip) {
return util.NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": tunCidr.String()}, nil)
}
lhMap[iputil.Ip2VpnIp(ip)] = struct{}{}
}
if !lh.amLighthouse && len(lhMap) == 0 {
lh.l.Warn("No lighthouse.hosts configured, this host will only be able to initiate tunnels with static_host_map entries")
}
staticList := lh.GetStaticHostList()
for lhIP, _ := range lhMap {
if _, ok := staticList[lhIP]; !ok {
return fmt.Errorf("lighthouse %s does not have a static_host_map entry", lhIP)
}
}
return nil
}
func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error {
shm := c.GetMap("static_host_map", map[interface{}]interface{}{})
i := 0
for k, v := range shm {
rip := net.ParseIP(fmt.Sprintf("%v", k))
if rip == nil {
return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, nil)
}
if !tunCidr.Contains(rip) {
return util.NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": rip, "network": tunCidr.String(), "entry": i + 1}, nil)
}
vpnIp := iputil.Ip2VpnIp(rip)
vals, ok := v.([]interface{})
if ok {
for _, v := range vals {
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
if err != nil {
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
}
lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList)
}
} else {
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
if err != nil {
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
}
lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList)
}
i++
}
return nil return nil
} }
@ -146,10 +312,11 @@ func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f udp.EncWriter) {
return return
} }
lh.metricTx(NebulaMeta_HostQuery, int64(len(lh.lighthouses))) lighthouses := lh.GetLighthouses()
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
nb := make([]byte, 12, 12) nb := make([]byte, 12, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
for n := range lh.lighthouses { for n := range lighthouses {
f.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out) f.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
} }
} }
@ -197,7 +364,7 @@ func (lh *LightHouse) queryAndPrepMessage(vpnIp iputil.VpnIp, f func(*cache) (in
func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) { func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
// First we check the static mapping // First we check the static mapping
// and do nothing if it is there // and do nothing if it is there
if _, ok := lh.staticList[vpnIp]; ok { if _, ok := lh.GetStaticHostList()[vpnIp]; ok {
return return
} }
lh.Lock() lh.Lock()
@ -211,10 +378,11 @@ func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
lh.Unlock() lh.Unlock()
} }
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner // addStaticRemote adds a static host entry for vpnIp as ourselves as the owner
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with // We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client // And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
func (lh *LightHouse) AddStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr) { //NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
func (lh *LightHouse) addStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr, staticList map[iputil.VpnIp]struct{}) {
lh.Lock() lh.Lock()
am := lh.unlockedGetRemoteList(vpnIp) am := lh.unlockedGetRemoteList(vpnIp)
am.Lock() am.Lock()
@ -236,8 +404,8 @@ func (lh *LightHouse) AddStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr) {
am.unlockedPrependV6(lh.myVpnIp, to) am.unlockedPrependV6(lh.myVpnIp, to)
} }
// Mark it as static // Mark it as static in the caller provided map
lh.staticList[vpnIp] = struct{}{} staticList[vpnIp] = struct{}{}
} }
// unlockedGetRemoteList assumes you have the lh lock // unlockedGetRemoteList assumes you have the lh lock
@ -252,7 +420,7 @@ func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
// unlockedShouldAddV4 checks if to is allowed by our allow list // unlockedShouldAddV4 checks if to is allowed by our allow list
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool { func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
allow := lh.remoteAllowList.AllowIpV4(vpnIp, iputil.VpnIp(to.Ip)) allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
if lh.l.Level >= logrus.TraceLevel { if lh.l.Level >= logrus.TraceLevel {
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow") lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
} }
@ -266,7 +434,7 @@ func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bo
// unlockedShouldAddV6 checks if to is allowed by our allow list // unlockedShouldAddV6 checks if to is allowed by our allow list
func (lh *LightHouse) unlockedShouldAddV6(vpnIp iputil.VpnIp, to *Ip6AndPort) bool { func (lh *LightHouse) unlockedShouldAddV6(vpnIp iputil.VpnIp, to *Ip6AndPort) bool {
allow := lh.remoteAllowList.AllowIpV6(vpnIp, to.Hi, to.Lo) allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, to.Hi, to.Lo)
if lh.l.Level >= logrus.TraceLevel { if lh.l.Level >= logrus.TraceLevel {
lh.l.WithField("remoteIp", lhIp6ToIp(to)).WithField("allow", allow).Trace("remoteAllowList.Allow") lh.l.WithField("remoteIp", lhIp6ToIp(to)).WithField("allow", allow).Trace("remoteAllowList.Allow")
} }
@ -287,7 +455,7 @@ func lhIp6ToIp(v *Ip6AndPort) net.IP {
} }
func (lh *LightHouse) IsLighthouseIP(vpnIp iputil.VpnIp) bool { func (lh *LightHouse) IsLighthouseIP(vpnIp iputil.VpnIp) bool {
if _, ok := lh.lighthouses[vpnIp]; ok { if _, ok := lh.GetLighthouses()[vpnIp]; ok {
return true return true
} }
return false return false
@ -329,18 +497,24 @@ func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
} }
func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f udp.EncWriter) { func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f udp.EncWriter) {
if lh.amLighthouse || lh.interval == 0 { lh.updateParentCtx = ctx
lh.updateUdp = f
interval := lh.GetUpdateInterval()
if lh.amLighthouse || interval == 0 {
return return
} }
clockSource := time.NewTicker(time.Second * time.Duration(lh.interval)) clockSource := time.NewTicker(time.Second * time.Duration(interval))
updateCtx, cancel := context.WithCancel(ctx)
lh.updateCancel = cancel
defer clockSource.Stop() defer clockSource.Stop()
for { for {
lh.SendUpdate(f) lh.SendUpdate(f)
select { select {
case <-ctx.Done(): case <-updateCtx.Done():
return return
case <-clockSource.C: case <-clockSource.C:
continue continue
@ -352,7 +526,8 @@ func (lh *LightHouse) SendUpdate(f udp.EncWriter) {
var v4 []*Ip4AndPort var v4 []*Ip4AndPort
var v6 []*Ip6AndPort var v6 []*Ip6AndPort
for _, e := range *localIps(lh.l, lh.localAllowList) { lal := lh.GetLocalAllowList()
for _, e := range *localIps(lh.l, lal) {
if ip4 := e.To4(); ip4 != nil && ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.Ip2VpnIp(ip4)) { if ip4 := e.To4(); ip4 != nil && ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.Ip2VpnIp(ip4)) {
continue continue
} }
@ -373,7 +548,8 @@ func (lh *LightHouse) SendUpdate(f udp.EncWriter) {
}, },
} }
lh.metricTx(NebulaMeta_HostUpdateNotification, int64(len(lh.lighthouses))) lighthouses := lh.GetLighthouses()
lh.metricTx(NebulaMeta_HostUpdateNotification, int64(len(lighthouses)))
nb := make([]byte, 12, 12) nb := make([]byte, 12, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
@ -383,7 +559,7 @@ func (lh *LightHouse) SendUpdate(f udp.EncWriter) {
return return
} }
for vpnIp := range lh.lighthouses { for vpnIp := range lighthouses {
f.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out) f.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
} }
} }
@ -609,7 +785,7 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp i
} }
go func() { go func() {
time.Sleep(lhh.lh.punchDelay) time.Sleep(lhh.lh.punchy.GetDelay())
lhh.lh.metricHolepunchTx.Inc(1) lhh.lh.metricHolepunchTx.Inc(1)
lhh.lh.punchConn.WriteTo(empty, vpnPeer) lhh.lh.punchConn.WriteTo(empty, vpnPeer)
}() }()
@ -631,7 +807,7 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp i
// This sends a nebula test packet to the host trying to contact us. In the case // This sends a nebula test packet to the host trying to contact us. In the case
// of a double nat or other difficult scenario, this may help establish // of a double nat or other difficult scenario, this may help establish
// a tunnel. // a tunnel.
if lhh.lh.punchBack { if lhh.lh.punchy.GetRespond() {
queryVpnIp := iputil.VpnIp(n.Details.VpnIp) queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
go func() { go func() {
time.Sleep(time.Second * 5) time.Sleep(time.Second * 5)

View File

@ -6,6 +6,7 @@ import (
"testing" "testing"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
@ -47,33 +48,32 @@ func TestNewLhQuery(t *testing.T) {
func Test_lhStaticMapping(t *testing.T) { func Test_lhStaticMapping(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16")
lh1 := "10.128.0.2" lh1 := "10.128.0.2"
lh1IP := net.ParseIP(lh1)
udpServer, _ := udp.NewListener(l, "0.0.0.0", 0, true, 2) c := config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
meh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 255}}, []iputil.VpnIp{iputil.Ip2VpnIp(lh1IP)}, 10, 10003, udpServer, false, 1, false) c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
meh.AddStaticRemote(iputil.Ip2VpnIp(lh1IP), udp.NewAddr(lh1IP, uint16(4242))) _, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
err := meh.ValidateLHStaticEntries()
assert.Nil(t, err) assert.Nil(t, err)
lh2 := "10.128.0.3" lh2 := "10.128.0.3"
lh2IP := net.ParseIP(lh2) c = config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
meh = NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 255}}, []iputil.VpnIp{iputil.Ip2VpnIp(lh1IP), iputil.Ip2VpnIp(lh2IP)}, 10, 10003, udpServer, false, 1, false) c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
meh.AddStaticRemote(iputil.Ip2VpnIp(lh1IP), udp.NewAddr(lh1IP, uint16(4242))) _, err = NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
err = meh.ValidateLHStaticEntries() assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
assert.EqualError(t, err, "Lighthouse 10.128.0.3 does not have a static_host_map entry")
} }
func BenchmarkLighthouseHandleRequest(b *testing.B) { func BenchmarkLighthouseHandleRequest(b *testing.B) {
l := test.NewLogger() l := test.NewLogger()
lh1 := "10.128.0.2" _, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
lh1IP := net.ParseIP(lh1)
udpServer, _ := udp.NewListener(l, "0.0.0.0", 0, true, 2) c := config.NewC(l)
lh, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{0, 0, 0, 0}}, []iputil.VpnIp{iputil.Ip2VpnIp(lh1IP)}, 10, 10003, udpServer, false, 1, false) if !assert.NoError(b, err) {
b.Fatal()
}
hAddr := udp.NewAddrFromString("4.5.6.7:12345") hAddr := udp.NewAddrFromString("4.5.6.7:12345")
hAddr2 := udp.NewAddrFromString("4.5.6.7:12346") hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
@ -160,8 +160,11 @@ func TestLighthouse_Memory(t *testing.T) {
theirUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.3"), Port: 4242} theirUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.3"), Port: 4242}
theirVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.3")) theirVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.3"))
udpServer, _ := udp.NewListener(l, "0.0.0.0", 0, true, 2) c := config.NewC(l)
lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, []iputil.VpnIp{}, 10, 10003, udpServer, false, 1, false) c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
assert.NoError(t, err)
lhh := lh.NewRequestHandler() lhh := lh.NewRequestHandler()
// Test that my first update responds with just that // Test that my first update responds with just that
@ -179,9 +182,16 @@ func TestLighthouse_Memory(t *testing.T) {
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh) r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
// Update a different host // Update a different host and ask about it
newLHHostUpdate(theirUdpAddr0, theirVpnIp, []*udp.Addr{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh) newLHHostUpdate(theirUdpAddr0, theirVpnIp, []*udp.Addr{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh)
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, theirVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
// Have both hosts ask about the other
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, myVpnIp, lhh) r = newLHHostRequest(theirUdpAddr0, theirVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
r = newLHHostRequest(myUdpAddr0, myVpnIp, theirVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
// Make sure we didn't get changed // Make sure we didn't get changed
@ -224,6 +234,18 @@ func TestLighthouse_Memory(t *testing.T) {
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good)
} }
func TestLighthouse_reload(t *testing.T) {
l := test.NewLogger()
c := config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
assert.NoError(t, err)
c.Settings["static_host_map"] = map[interface{}]interface{}{"10.128.0.2": []interface{}{"1.1.1.1:4242"}}
lh.reload(c, false)
}
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply { func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
req := &NebulaMeta{ req := &NebulaMeta{
Type: NebulaMeta_HostQuery, Type: NebulaMeta_HostQuery,
@ -237,7 +259,10 @@ func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh
panic(err) panic(err)
} }
w := &testEncWriter{} filter := NebulaMeta_HostQueryReply
w := &testEncWriter{
metaFilter: &filter,
}
lhh.HandleRequest(fromAddr, myVpnIp, b, w) lhh.HandleRequest(fromAddr, myVpnIp, b, w)
return w.lastReply return w.lastReply
} }
@ -344,18 +369,22 @@ type testLhReply struct {
} }
type testEncWriter struct { type testEncWriter struct {
lastReply testLhReply lastReply testLhReply
metaFilter *NebulaMeta_MessageType
} }
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) { func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
tw.lastReply = testLhReply{ msg := &NebulaMeta{}
nebType: t, err := proto.Unmarshal(p, msg)
nebSubType: st, if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
vpnIp: vpnIp, tw.lastReply = testLhReply{
msg: &NebulaMeta{}, nebType: t,
nebSubType: st,
vpnIp: vpnIp,
msg: msg,
}
} }
err := proto.Unmarshal(p, tw.lastReply.msg)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -363,7 +392,10 @@ func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.Mess
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match // assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...*udp.Addr) { func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...*udp.Addr) {
assert.Len(t, have, len(want)) if !assert.Len(t, have, len(want)) {
return
}
for k, w := range want { for k, w := range want {
if !(have[k].Ip == uint32(iputil.Ip2VpnIp(w.IP)) && have[k].Port == uint32(w.Port)) { if !(have[k].Ip == uint32(iputil.Ip2VpnIp(w.IP)) && have[k].Port == uint32(w.Port)) {
assert.Fail(t, fmt.Sprintf("Response did not contain: %v:%v at %v; %v", w.IP, w.Port, k, translateV4toUdpAddr(have))) assert.Fail(t, fmt.Sprintf("Response did not contain: %v:%v at %v; %v", w.IP, w.Port, k, translateV4toUdpAddr(have)))
@ -373,7 +405,10 @@ func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...*udp.Addr) {
// assertUdpAddrInArray asserts every address in want is at the same position in have and that the lengths match // assertUdpAddrInArray asserts every address in want is at the same position in have and that the lengths match
func assertUdpAddrInArray(t *testing.T, have []*udp.Addr, want ...*udp.Addr) { func assertUdpAddrInArray(t *testing.T, have []*udp.Addr, want ...*udp.Addr) {
assert.Len(t, have, len(want)) if !assert.Len(t, have, len(want)) {
return
}
for k, w := range want { for k, w := range want {
if !(have[k].IP.Equal(w.IP) && have[k].Port == w.Port) { if !(have[k].IP.Equal(w.IP) && have[k].Port == w.Port) {
assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v; %v", w, k, have)) assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v; %v", w, k, have))

97
main.go
View File

@ -3,13 +3,13 @@ package nebula
import ( import (
"context" "context"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"net" "net"
"time" "time"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/overlay" "github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/sshd" "github.com/slackhq/nebula/sshd"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
@ -218,95 +218,18 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
go hostMap.Promoter(config.GetInt("promoter.interval")) go hostMap.Promoter(config.GetInt("promoter.interval"))
*/ */
punchy := NewPunchyFromConfig(c) punchy := NewPunchyFromConfig(l, c)
if punchy.Punch && !configTest { if punchy.GetPunch() && !configTest {
l.Info("UDP hole punching enabled") l.Info("UDP hole punching enabled")
go hostMap.Punchy(ctx, udpConns[0]) go hostMap.Punchy(ctx, udpConns[0])
} }
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false) lightHouse, err := NewLightHouseFromConfig(l, c, tunCidr, udpConns[0], punchy)
switch {
// fatal if am_lighthouse is enabled but we are using an ephemeral port case errors.As(err, &util.ContextualError{}):
if amLighthouse && (c.GetInt("listen.port", 0) == 0) { return nil, err
return nil, util.NewContextualError("lighthouse.am_lighthouse enabled on node but no port number is set in config", nil, nil) case err != nil:
} return nil, util.NewContextualError("Failed to initialize lighthouse handler", nil, err)
// warn if am_lighthouse is enabled but upstream lighthouses exists
rawLighthouseHosts := c.GetStringSlice("lighthouse.hosts", []string{})
if amLighthouse && len(rawLighthouseHosts) != 0 {
l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
}
lighthouseHosts := make([]iputil.VpnIp, len(rawLighthouseHosts))
for i, host := range rawLighthouseHosts {
ip := net.ParseIP(host)
if ip == nil {
return nil, util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, nil)
}
if !tunCidr.Contains(ip) {
return nil, util.NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": tunCidr.String()}, nil)
}
lighthouseHosts[i] = iputil.Ip2VpnIp(ip)
}
if !amLighthouse && len(lighthouseHosts) == 0 {
l.Warn("No lighthouses.hosts configured, this host will only be able to initiate tunnels with static_host_map entries")
}
lightHouse := NewLightHouse(
l,
amLighthouse,
tunCidr,
lighthouseHosts,
//TODO: change to a duration
c.GetInt("lighthouse.interval", 10),
uint32(port),
udpConns[0],
punchy.Respond,
punchy.Delay,
c.GetBool("stats.lighthouse_metrics", false),
)
remoteAllowList, err := NewRemoteAllowListFromConfig(c, "lighthouse.remote_allow_list", "lighthouse.remote_allow_ranges")
if err != nil {
return nil, util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
}
lightHouse.SetRemoteAllowList(remoteAllowList)
localAllowList, err := NewLocalAllowListFromConfig(c, "lighthouse.local_allow_list")
if err != nil {
return nil, util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
}
lightHouse.SetLocalAllowList(localAllowList)
//TODO: Move all of this inside functions in lighthouse.go
for k, v := range c.GetMap("static_host_map", map[interface{}]interface{}{}) {
ip := net.ParseIP(fmt.Sprintf("%v", k))
vpnIp := iputil.Ip2VpnIp(ip)
if !tunCidr.Contains(ip) {
return nil, util.NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": vpnIp, "network": tunCidr.String()}, nil)
}
vals, ok := v.([]interface{})
if ok {
for _, v := range vals {
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
if err != nil {
return nil, util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp}, err)
}
lightHouse.AddStaticRemote(vpnIp, udp.NewAddr(ip, port))
}
} else {
ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v))
if err != nil {
return nil, util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp}, err)
}
lightHouse.AddStaticRemote(vpnIp, udp.NewAddr(ip, port))
}
}
err = lightHouse.ValidateLHStaticEntries()
if err != nil {
l.WithError(err).Error("Lighthouse unreachable")
} }
var messageMetrics *MessageMetrics var messageMetrics *MessageMetrics
@ -411,7 +334,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host // Start DNS server last to allow using the nebula IP as lighthouse.dns.host
var dnsStart func() var dnsStart func()
if amLighthouse && serveDns { if lightHouse.amLighthouse && serveDns {
l.Debugln("Starting dns server") l.Debugln("Starting dns server")
dnsStart = dnsMain(l, hostMap, c) dnsStart = dnsMain(l, hostMap, c)
} }

View File

@ -157,7 +157,7 @@ func (f *Interface) sendCloseTunnel(h *HostInfo) {
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udp.Addr) { func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udp.Addr) {
if !hostinfo.remote.Equals(addr) { if !hostinfo.remote.Equals(addr) {
if !f.lightHouse.remoteAllowList.Allow(hostinfo.vpnIp, addr.IP) { if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
hostinfo.logger(f.l).WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming") hostinfo.logger(f.l).WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming")
return return
} }

View File

@ -1,34 +1,89 @@
package nebula package nebula
import ( import (
"sync/atomic"
"time" "time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
) )
type Punchy struct { type Punchy struct {
Punch bool atomicPunch int32
Respond bool atomicRespond int32
Delay time.Duration atomicDelay time.Duration
l *logrus.Logger
} }
func NewPunchyFromConfig(c *config.C) *Punchy { func NewPunchyFromConfig(l *logrus.Logger, c *config.C) *Punchy {
p := &Punchy{} p := &Punchy{l: l}
if c.IsSet("punchy.punch") { p.reload(c, true)
p.Punch = c.GetBool("punchy.punch", false) c.RegisterReloadCallback(func(c *config.C) {
} else { p.reload(c, false)
// Deprecated fallback })
p.Punch = c.GetBool("punchy", false)
}
if c.IsSet("punchy.respond") {
p.Respond = c.GetBool("punchy.respond", false)
} else {
// Deprecated fallback
p.Respond = c.GetBool("punch_back", false)
}
p.Delay = c.GetDuration("punchy.delay", time.Second)
return p return p
} }
func (p *Punchy) reload(c *config.C, initial bool) {
if initial {
var yes bool
if c.IsSet("punchy.punch") {
yes = c.GetBool("punchy.punch", false)
} else {
// Deprecated fallback
yes = c.GetBool("punchy", false)
}
if yes {
atomic.StoreInt32(&p.atomicPunch, 1)
} else {
atomic.StoreInt32(&p.atomicPunch, 0)
}
} else if c.HasChanged("punchy.punch") || c.HasChanged("punchy") {
//TODO: it should be relatively easy to support this, just need to be able to cancel the goroutine and boot it up from here
p.l.Warn("Changing punchy.punch with reload is not supported, ignoring.")
}
if initial || c.HasChanged("punchy.respond") || c.HasChanged("punch_back") {
var yes bool
if c.IsSet("punchy.respond") {
yes = c.GetBool("punchy.respond", false)
} else {
// Deprecated fallback
yes = c.GetBool("punch_back", false)
}
if yes {
atomic.StoreInt32(&p.atomicRespond, 1)
} else {
atomic.StoreInt32(&p.atomicRespond, 0)
}
if !initial {
p.l.Infof("punchy.respond changed to %v", p.GetRespond())
}
}
//NOTE: this will not apply to any in progress operations, only the next one
if initial || c.HasChanged("punchy.delay") {
atomic.StoreInt64((*int64)(&p.atomicDelay), (int64)(c.GetDuration("punchy.delay", time.Second)))
if !initial {
p.l.Infof("punchy.delay changed to %s", p.GetDelay())
}
}
}
func (p *Punchy) GetPunch() bool {
return atomic.LoadInt32(&p.atomicPunch) == 1
}
func (p *Punchy) GetRespond() bool {
return atomic.LoadInt32(&p.atomicRespond) == 1
}
func (p *Punchy) GetDelay() time.Duration {
return (time.Duration)(atomic.LoadInt64((*int64)(&p.atomicDelay)))
}

View File

@ -14,34 +14,58 @@ func TestNewPunchyFromConfig(t *testing.T) {
c := config.NewC(l) c := config.NewC(l)
// Test defaults // Test defaults
p := NewPunchyFromConfig(c) p := NewPunchyFromConfig(l, c)
assert.Equal(t, false, p.Punch) assert.Equal(t, false, p.GetPunch())
assert.Equal(t, false, p.Respond) assert.Equal(t, false, p.GetRespond())
assert.Equal(t, time.Second, p.Delay) assert.Equal(t, time.Second, p.GetDelay())
// punchy deprecation // punchy deprecation
c.Settings["punchy"] = true c.Settings["punchy"] = true
p = NewPunchyFromConfig(c) p = NewPunchyFromConfig(l, c)
assert.Equal(t, true, p.Punch) assert.Equal(t, true, p.GetPunch())
// punchy.punch // punchy.punch
c.Settings["punchy"] = map[interface{}]interface{}{"punch": true} c.Settings["punchy"] = map[interface{}]interface{}{"punch": true}
p = NewPunchyFromConfig(c) p = NewPunchyFromConfig(l, c)
assert.Equal(t, true, p.Punch) assert.Equal(t, true, p.GetPunch())
// punch_back deprecation // punch_back deprecation
c.Settings["punch_back"] = true c.Settings["punch_back"] = true
p = NewPunchyFromConfig(c) p = NewPunchyFromConfig(l, c)
assert.Equal(t, true, p.Respond) assert.Equal(t, true, p.GetRespond())
// punchy.respond // punchy.respond
c.Settings["punchy"] = map[interface{}]interface{}{"respond": true} c.Settings["punchy"] = map[interface{}]interface{}{"respond": true}
c.Settings["punch_back"] = false c.Settings["punch_back"] = false
p = NewPunchyFromConfig(c) p = NewPunchyFromConfig(l, c)
assert.Equal(t, true, p.Respond) assert.Equal(t, true, p.GetRespond())
// punchy.delay // punchy.delay
c.Settings["punchy"] = map[interface{}]interface{}{"delay": "1m"} c.Settings["punchy"] = map[interface{}]interface{}{"delay": "1m"}
p = NewPunchyFromConfig(c) p = NewPunchyFromConfig(l, c)
assert.Equal(t, time.Minute, p.Delay) assert.Equal(t, time.Minute, p.GetDelay())
}
func TestPunchy_reload(t *testing.T) {
l := test.NewLogger()
c := config.NewC(l)
delay, _ := time.ParseDuration("1m")
assert.NoError(t, c.LoadString(`
punchy:
delay: 1m
respond: false
`))
p := NewPunchyFromConfig(l, c)
assert.Equal(t, delay, p.GetDelay())
assert.Equal(t, false, p.GetRespond())
newDelay, _ := time.ParseDuration("10m")
assert.NoError(t, c.ReloadConfigString(`
punchy:
delay: 10m
respond: true
`))
p.reload(c, false)
assert.Equal(t, newDelay, p.GetDelay())
assert.Equal(t, true, p.GetRespond())
} }