update to work with the latest locks

This commit is contained in:
Wade Simmons 2023-12-18 21:01:26 -05:00
parent fdb78044ba
commit 5ce8279875

View File

@ -5,9 +5,7 @@ package nebula
import ( import (
"fmt" "fmt"
"log"
"runtime" "runtime"
"runtime/debug"
"sync" "sync"
"github.com/timandy/routine" "github.com/timandy/routine"
@ -23,6 +21,13 @@ const (
mutexKeyTypeHandshakeManager = "handshake-manager" mutexKeyTypeHandshakeManager = "handshake-manager"
) )
// For each Key in this map, the Value is a list of lock types you can already have
// when you want to grab that Key. This ensures that locks are always fetched
// in the same order, to prevent deadlocks.
var allowedConcurrentLocks = map[mutexKeyType][]mutexKeyType{
mutexKeyTypeHandshakeManager: {mutexKeyTypeHostMap},
}
type mutexKey struct { type mutexKey struct {
Type mutexKeyType Type mutexKeyType
ID uint32 ID uint32
@ -45,37 +50,30 @@ func newSyncRWMutex(key mutexKey) syncRWMutex {
} }
func alertMutex(err error) { func alertMutex(err error) {
log.Print(err, string(debug.Stack())) panic(err)
// NOTE: you could switch to this log Line and remove the panic if you want
// to log all failures instead of panicking on the first one
//log.Print(err, string(debug.Stack()))
} }
func checkMutex(state map[mutexKey]mutexValue, add mutexKey) { func checkMutex(state map[mutexKey]mutexValue, add mutexKey) {
for k := range state { allowedConcurrent := allowedConcurrentLocks[add.Type]
for k, v := range state {
if add == k { if add == k {
alertMutex(fmt.Errorf("re-entrant lock: state=%v add=%v", state, add)) alertMutex(fmt.Errorf("re-entrant lock: %s. previous allocation: %s", add, v))
}
} }
switch add.Type { // TODO use slices.Contains, but requires go1.21
case mutexKeyTypeHostInfo: var found bool
// Check for any other hostinfo keys: for _, a := range allowedConcurrent {
for k := range state { if a == k.Type {
if k.Type == mutexKeyTypeHostInfo { found = true
alertMutex(fmt.Errorf("grabbing hostinfo lock and already have a hostinfo lock: state=%v add=%v", state, add)) break
} }
} }
if _, ok := state[mutexKey{Type: mutexKeyTypeHostMap}]; ok { if !found {
alertMutex(fmt.Errorf("grabbing hostinfo lock and already have hostmap: state=%v add=%v", state, add)) alertMutex(fmt.Errorf("grabbing %s lock and already have these locks: %s", add.Type, state))
}
if _, ok := state[mutexKey{Type: mutexKeyTypeHandshakeManager}]; ok {
alertMutex(fmt.Errorf("grabbing hostinfo lock and already have handshake-manager: state=%v add=%v", state, add))
}
// case mutexKeyTypeHandshakeManager:
// if _, ok := state[mutexKey{Type: mutexKeyTypeHostMap}]; ok {
// alertMutex(fmt.Errorf("grabbing handshake-manager lock and already have hostmap: state=%v add=%v", state, add))
// }
case mutexKeyTypeHostMap:
if _, ok := state[mutexKey{Type: mutexKeyTypeHandshakeManager}]; ok {
alertMutex(fmt.Errorf("grabbing hostmap lock and already have handshake-manager: state=%v add=%v", state, add))
} }
} }
} }
@ -109,3 +107,15 @@ func (s *syncRWMutex) RUnlock() {
delete(m, s.mutexKey) delete(m, s.mutexKey)
s.RWMutex.RUnlock() s.RWMutex.RUnlock()
} }
func (m mutexKey) String() string {
if m.ID == 0 {
return fmt.Sprintf("%s", m.Type)
} else {
return fmt.Sprintf("%s(%d)", m.Type, m.ID)
}
}
func (m mutexValue) String() string {
return fmt.Sprintf("%s:%d", m.file, m.line)
}