Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

View File

@@ -0,0 +1,445 @@
package locking
import (
"sync/atomic"
"unsafe"
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/sync"
)
const (
// ShardOrder is an optional parameter specifying the base-2 log of the
// number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
// unnecessary synchronization between unrelated concurrent operations,
// improving performance for write-heavy workloads, but increase memory
// usage for small maps.
ancestorsShardOrder = 0
)
// Hasher is an optional type parameter. If Hasher is provided, it must define
// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
type ancestorsHasher struct {
ancestorsdefaultHasher
}
// defaultHasher is the default Hasher. This indirection exists because
// defaultHasher must exist even if a custom Hasher is provided, to prevent the
// Go compiler from complaining about defaultHasher's unused imports.
type ancestorsdefaultHasher struct {
fn func(unsafe.Pointer, uintptr) uintptr
seed uintptr
}
// Init initializes the Hasher.
func (h *ancestorsdefaultHasher) Init() {
h.fn = sync.MapKeyHasher(map[*MutexClass]*string(nil))
h.seed = sync.RandUintptr()
}
// Hash returns the hash value for the given Key.
func (h *ancestorsdefaultHasher) Hash(key *MutexClass) uintptr {
return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
}
var ancestorshasher ancestorsHasher
func init() {
ancestorshasher.Init()
}
// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
// safe for concurrent use from multiple goroutines without additional
// synchronization.
//
// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
// use. AtomicPtrMaps must not be copied after first use.
//
// sync.Map may be faster than AtomicPtrMap if most operations on the map are
// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
// other circumstances.
type ancestorsAtomicPtrMap struct {
shards [1 << ancestorsShardOrder]ancestorsapmShard
}
func (m *ancestorsAtomicPtrMap) shard(hash uintptr) *ancestorsapmShard {
// Go defines right shifts >= width of shifted unsigned operand as 0, so
// this is correct even if ShardOrder is 0 (although nogo complains because
// nogo is dumb).
const indexLSB = unsafe.Sizeof(uintptr(0))*8 - ancestorsShardOrder
index := hash >> indexLSB
return (*ancestorsapmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(ancestorsapmShard{}))))
}
type ancestorsapmShard struct {
ancestorsapmShardMutationData
_ [ancestorsapmShardMutationDataPadding]byte
ancestorsapmShardLookupData
_ [ancestorsapmShardLookupDataPadding]byte
}
type ancestorsapmShardMutationData struct {
dirtyMu sync.Mutex // serializes slot transitions out of empty
dirty uintptr // # slots with val != nil
count uintptr // # slots with val != nil and val != tombstone()
rehashMu sync.Mutex // serializes rehashing
}
type ancestorsapmShardLookupData struct {
seq sync.SeqCount // allows atomic reads of slots+mask
slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
}
const (
ancestorscacheLineBytes = 64
// Cache line padding is enabled if sharding is.
ancestorsapmEnablePadding = (ancestorsShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
// The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
// cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
ancestorsapmShardMutationDataRequiredPadding = ancestorscacheLineBytes - (((unsafe.Sizeof(ancestorsapmShardMutationData{}) - 1) % ancestorscacheLineBytes) + 1)
ancestorsapmShardMutationDataPadding = ancestorsapmEnablePadding * ancestorsapmShardMutationDataRequiredPadding
ancestorsapmShardLookupDataRequiredPadding = ancestorscacheLineBytes - (((unsafe.Sizeof(ancestorsapmShardLookupData{}) - 1) % ancestorscacheLineBytes) + 1)
ancestorsapmShardLookupDataPadding = ancestorsapmEnablePadding * ancestorsapmShardLookupDataRequiredPadding
// These define fractional thresholds for when apmShard.rehash() is called
// (i.e. the load factor) and when it rehases to a larger table
// respectively. They are chosen such that the rehash threshold = the
// expansion threshold + 1/2, so that when reuse of deleted slots is rare
// or non-existent, rehashing occurs after the insertion of at least 1/2
// the table's size in new entries, which is acceptably infrequent.
ancestorsapmRehashThresholdNum = 2
ancestorsapmRehashThresholdDen = 3
ancestorsapmExpansionThresholdNum = 1
ancestorsapmExpansionThresholdDen = 6
)
type ancestorsapmSlot struct {
// slot states are indicated by val:
//
// * Empty: val == nil; key is meaningless. May transition to full or
// evacuated with dirtyMu locked.
//
// * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
// is the Value mapped to key. May transition to deleted or evacuated.
//
// * Deleted: val == tombstone(); key is still immutable. key is mapped to
// no Value. May transition to full or evacuated.
//
// * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
// slots that have already been moved, requiring readers to wait for
// rehashing to complete and use the new table. Terminal state.
//
// Note that once val is non-nil, it cannot become nil again. That is, the
// transition from empty to non-empty is irreversible for a given slot;
// the only way to create more empty slots is by rehashing.
val unsafe.Pointer
key *MutexClass
}
func ancestorsapmSlotAt(slots unsafe.Pointer, pos uintptr) *ancestorsapmSlot {
return (*ancestorsapmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(ancestorsapmSlot{})))
}
var ancestorstombstoneObj byte
func ancestorstombstone() unsafe.Pointer {
return unsafe.Pointer(&ancestorstombstoneObj)
}
var ancestorsevacuatedObj byte
func ancestorsevacuated() unsafe.Pointer {
return unsafe.Pointer(&ancestorsevacuatedObj)
}
// Load returns the Value stored in m for key.
func (m *ancestorsAtomicPtrMap) Load(key *MutexClass) *string {
hash := ancestorshasher.Hash(key)
shard := m.shard(hash)
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
return nil
}
i := hash & mask
inc := uintptr(1)
for {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
return nil
}
if slotVal == ancestorsevacuated() {
goto retry
}
if slot.key == key {
if slotVal == ancestorstombstone() {
return nil
}
return (*string)(slotVal)
}
i = (i + inc) & mask
inc++
}
}
// Store stores the Value val for key.
func (m *ancestorsAtomicPtrMap) Store(key *MutexClass, val *string) {
m.maybeCompareAndSwap(key, false, nil, val)
}
// Swap stores the Value val for key and returns the previously-mapped Value.
func (m *ancestorsAtomicPtrMap) Swap(key *MutexClass, val *string) *string {
return m.maybeCompareAndSwap(key, false, nil, val)
}
// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
// stores the Value newVal for key. CompareAndSwap returns the previous Value
// stored for key, whether or not it stores newVal.
func (m *ancestorsAtomicPtrMap) CompareAndSwap(key *MutexClass, oldVal, newVal *string) *string {
return m.maybeCompareAndSwap(key, true, oldVal, newVal)
}
func (m *ancestorsAtomicPtrMap) maybeCompareAndSwap(key *MutexClass, compare bool, typedOldVal, typedNewVal *string) *string {
hash := ancestorshasher.Hash(key)
shard := m.shard(hash)
oldVal := ancestorstombstone()
if typedOldVal != nil {
oldVal = unsafe.Pointer(typedOldVal)
}
newVal := ancestorstombstone()
if typedNewVal != nil {
newVal = unsafe.Pointer(typedNewVal)
}
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
if (compare && oldVal != ancestorstombstone()) || newVal == ancestorstombstone() {
return nil
}
shard.rehash(nil)
goto retry
}
i := hash & mask
inc := uintptr(1)
for {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
if (compare && oldVal != ancestorstombstone()) || newVal == ancestorstombstone() {
return nil
}
shard.dirtyMu.Lock()
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == nil {
if dirty, capacity := shard.dirty+1, mask+1; dirty*ancestorsapmRehashThresholdDen >= capacity*ancestorsapmRehashThresholdNum {
shard.dirtyMu.Unlock()
shard.rehash(slots)
goto retry
}
slot.key = key
atomic.StorePointer(&slot.val, newVal)
shard.dirty++
atomic.AddUintptr(&shard.count, 1)
shard.dirtyMu.Unlock()
return nil
}
shard.dirtyMu.Unlock()
}
if slotVal == ancestorsevacuated() {
goto retry
}
if slot.key == key {
for {
if (compare && oldVal != slotVal) || newVal == slotVal {
if slotVal == ancestorstombstone() {
return nil
}
return (*string)(slotVal)
}
if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
if slotVal == ancestorstombstone() {
atomic.AddUintptr(&shard.count, 1)
return nil
}
if newVal == ancestorstombstone() {
atomic.AddUintptr(&shard.count, ^uintptr(0))
}
return (*string)(slotVal)
}
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == ancestorsevacuated() {
goto retry
}
}
}
i = (i + inc) & mask
inc++
}
}
// rehash is marked nosplit to avoid preemption during table copying.
//
//go:nosplit
func (shard *ancestorsapmShard) rehash(oldSlots unsafe.Pointer) {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
if shard.slots != oldSlots {
return
}
newSize := uintptr(8)
if oldSlots != nil {
oldSize := shard.mask + 1
newSize = oldSize
if count := atomic.LoadUintptr(&shard.count) + 1; count*ancestorsapmExpansionThresholdDen > oldSize*ancestorsapmExpansionThresholdNum {
newSize *= 2
}
}
newSlotsSlice := make([]ancestorsapmSlot, newSize)
newSlots := unsafe.Pointer(&newSlotsSlice[0])
newMask := newSize - 1
shard.dirtyMu.Lock()
shard.seq.BeginWrite()
if oldSlots != nil {
realCount := uintptr(0)
oldMask := shard.mask
for i := uintptr(0); i <= oldMask; i++ {
oldSlot := ancestorsapmSlotAt(oldSlots, i)
val := atomic.SwapPointer(&oldSlot.val, ancestorsevacuated())
if val == nil || val == ancestorstombstone() {
continue
}
hash := ancestorshasher.Hash(oldSlot.key)
j := hash & newMask
inc := uintptr(1)
for {
newSlot := ancestorsapmSlotAt(newSlots, j)
if newSlot.val == nil {
newSlot.val = val
newSlot.key = oldSlot.key
break
}
j = (j + inc) & newMask
inc++
}
realCount++
}
shard.dirty = realCount
}
atomic.StorePointer(&shard.slots, newSlots)
atomic.StoreUintptr(&shard.mask, newMask)
shard.seq.EndWrite()
shard.dirtyMu.Unlock()
}
// Range invokes f on each Key-Value pair stored in m. If any call to f returns
// false, Range stops iteration and returns.
//
// Range does not necessarily correspond to any consistent snapshot of the
// Map's contents: no Key will be visited more than once, but if the Value for
// any Key is stored or deleted concurrently, Range may reflect any mapping for
// that Key from any point during the Range call.
//
// f must not call other methods on m.
func (m *ancestorsAtomicPtrMap) Range(f func(key *MutexClass, val *string) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
if !shard.doRange(f) {
return
}
}
}
func (shard *ancestorsapmShard) doRange(f func(key *MutexClass, val *string) bool) bool {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
slots := shard.slots
if slots == nil {
return true
}
mask := shard.mask
for i := uintptr(0); i <= mask; i++ {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil || slotVal == ancestorstombstone() {
continue
}
if !f(slot.key, (*string)(slotVal)) {
return false
}
}
return true
}
// RangeRepeatable is like Range, but:
//
// - RangeRepeatable may visit the same Key multiple times in the presence of
// concurrent mutators, possibly passing different Values to f in different
// calls.
//
// - It is safe for f to call other methods on m.
func (m *ancestorsAtomicPtrMap) RangeRepeatable(f func(key *MutexClass, val *string) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
continue
}
for i := uintptr(0); i <= mask; i++ {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == ancestorsevacuated() {
goto retry
}
if slotVal == nil || slotVal == ancestorstombstone() {
continue
}
if !f(slot.key, (*string)(slotVal)) {
return
}
}
}
}

View File

@@ -0,0 +1,445 @@
package locking
import (
"sync/atomic"
"unsafe"
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/sync"
)
const (
// ShardOrder is an optional parameter specifying the base-2 log of the
// number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
// unnecessary synchronization between unrelated concurrent operations,
// improving performance for write-heavy workloads, but increase memory
// usage for small maps.
goroutineLocksShardOrder = 0
)
// Hasher is an optional type parameter. If Hasher is provided, it must define
// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
type goroutineLocksHasher struct {
goroutineLocksdefaultHasher
}
// defaultHasher is the default Hasher. This indirection exists because
// defaultHasher must exist even if a custom Hasher is provided, to prevent the
// Go compiler from complaining about defaultHasher's unused imports.
type goroutineLocksdefaultHasher struct {
fn func(unsafe.Pointer, uintptr) uintptr
seed uintptr
}
// Init initializes the Hasher.
func (h *goroutineLocksdefaultHasher) Init() {
h.fn = sync.MapKeyHasher(map[int64]*goroutineLocks(nil))
h.seed = sync.RandUintptr()
}
// Hash returns the hash value for the given Key.
func (h *goroutineLocksdefaultHasher) Hash(key int64) uintptr {
return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
}
var goroutineLockshasher goroutineLocksHasher
func init() {
goroutineLockshasher.Init()
}
// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
// safe for concurrent use from multiple goroutines without additional
// synchronization.
//
// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
// use. AtomicPtrMaps must not be copied after first use.
//
// sync.Map may be faster than AtomicPtrMap if most operations on the map are
// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
// other circumstances.
type goroutineLocksAtomicPtrMap struct {
shards [1 << goroutineLocksShardOrder]goroutineLocksapmShard
}
func (m *goroutineLocksAtomicPtrMap) shard(hash uintptr) *goroutineLocksapmShard {
// Go defines right shifts >= width of shifted unsigned operand as 0, so
// this is correct even if ShardOrder is 0 (although nogo complains because
// nogo is dumb).
const indexLSB = unsafe.Sizeof(uintptr(0))*8 - goroutineLocksShardOrder
index := hash >> indexLSB
return (*goroutineLocksapmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(goroutineLocksapmShard{}))))
}
type goroutineLocksapmShard struct {
goroutineLocksapmShardMutationData
_ [goroutineLocksapmShardMutationDataPadding]byte
goroutineLocksapmShardLookupData
_ [goroutineLocksapmShardLookupDataPadding]byte
}
type goroutineLocksapmShardMutationData struct {
dirtyMu sync.Mutex // serializes slot transitions out of empty
dirty uintptr // # slots with val != nil
count uintptr // # slots with val != nil and val != tombstone()
rehashMu sync.Mutex // serializes rehashing
}
type goroutineLocksapmShardLookupData struct {
seq sync.SeqCount // allows atomic reads of slots+mask
slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
}
const (
goroutineLockscacheLineBytes = 64
// Cache line padding is enabled if sharding is.
goroutineLocksapmEnablePadding = (goroutineLocksShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
// The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
// cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
goroutineLocksapmShardMutationDataRequiredPadding = goroutineLockscacheLineBytes - (((unsafe.Sizeof(goroutineLocksapmShardMutationData{}) - 1) % goroutineLockscacheLineBytes) + 1)
goroutineLocksapmShardMutationDataPadding = goroutineLocksapmEnablePadding * goroutineLocksapmShardMutationDataRequiredPadding
goroutineLocksapmShardLookupDataRequiredPadding = goroutineLockscacheLineBytes - (((unsafe.Sizeof(goroutineLocksapmShardLookupData{}) - 1) % goroutineLockscacheLineBytes) + 1)
goroutineLocksapmShardLookupDataPadding = goroutineLocksapmEnablePadding * goroutineLocksapmShardLookupDataRequiredPadding
// These define fractional thresholds for when apmShard.rehash() is called
// (i.e. the load factor) and when it rehases to a larger table
// respectively. They are chosen such that the rehash threshold = the
// expansion threshold + 1/2, so that when reuse of deleted slots is rare
// or non-existent, rehashing occurs after the insertion of at least 1/2
// the table's size in new entries, which is acceptably infrequent.
goroutineLocksapmRehashThresholdNum = 2
goroutineLocksapmRehashThresholdDen = 3
goroutineLocksapmExpansionThresholdNum = 1
goroutineLocksapmExpansionThresholdDen = 6
)
type goroutineLocksapmSlot struct {
// slot states are indicated by val:
//
// * Empty: val == nil; key is meaningless. May transition to full or
// evacuated with dirtyMu locked.
//
// * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
// is the Value mapped to key. May transition to deleted or evacuated.
//
// * Deleted: val == tombstone(); key is still immutable. key is mapped to
// no Value. May transition to full or evacuated.
//
// * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
// slots that have already been moved, requiring readers to wait for
// rehashing to complete and use the new table. Terminal state.
//
// Note that once val is non-nil, it cannot become nil again. That is, the
// transition from empty to non-empty is irreversible for a given slot;
// the only way to create more empty slots is by rehashing.
val unsafe.Pointer
key int64
}
func goroutineLocksapmSlotAt(slots unsafe.Pointer, pos uintptr) *goroutineLocksapmSlot {
return (*goroutineLocksapmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(goroutineLocksapmSlot{})))
}
var goroutineLockstombstoneObj byte
func goroutineLockstombstone() unsafe.Pointer {
return unsafe.Pointer(&goroutineLockstombstoneObj)
}
var goroutineLocksevacuatedObj byte
func goroutineLocksevacuated() unsafe.Pointer {
return unsafe.Pointer(&goroutineLocksevacuatedObj)
}
// Load returns the Value stored in m for key.
func (m *goroutineLocksAtomicPtrMap) Load(key int64) *goroutineLocks {
hash := goroutineLockshasher.Hash(key)
shard := m.shard(hash)
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
return nil
}
i := hash & mask
inc := uintptr(1)
for {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
return nil
}
if slotVal == goroutineLocksevacuated() {
goto retry
}
if slot.key == key {
if slotVal == goroutineLockstombstone() {
return nil
}
return (*goroutineLocks)(slotVal)
}
i = (i + inc) & mask
inc++
}
}
// Store stores the Value val for key.
func (m *goroutineLocksAtomicPtrMap) Store(key int64, val *goroutineLocks) {
m.maybeCompareAndSwap(key, false, nil, val)
}
// Swap stores the Value val for key and returns the previously-mapped Value.
func (m *goroutineLocksAtomicPtrMap) Swap(key int64, val *goroutineLocks) *goroutineLocks {
return m.maybeCompareAndSwap(key, false, nil, val)
}
// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
// stores the Value newVal for key. CompareAndSwap returns the previous Value
// stored for key, whether or not it stores newVal.
func (m *goroutineLocksAtomicPtrMap) CompareAndSwap(key int64, oldVal, newVal *goroutineLocks) *goroutineLocks {
return m.maybeCompareAndSwap(key, true, oldVal, newVal)
}
func (m *goroutineLocksAtomicPtrMap) maybeCompareAndSwap(key int64, compare bool, typedOldVal, typedNewVal *goroutineLocks) *goroutineLocks {
hash := goroutineLockshasher.Hash(key)
shard := m.shard(hash)
oldVal := goroutineLockstombstone()
if typedOldVal != nil {
oldVal = unsafe.Pointer(typedOldVal)
}
newVal := goroutineLockstombstone()
if typedNewVal != nil {
newVal = unsafe.Pointer(typedNewVal)
}
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
if (compare && oldVal != goroutineLockstombstone()) || newVal == goroutineLockstombstone() {
return nil
}
shard.rehash(nil)
goto retry
}
i := hash & mask
inc := uintptr(1)
for {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
if (compare && oldVal != goroutineLockstombstone()) || newVal == goroutineLockstombstone() {
return nil
}
shard.dirtyMu.Lock()
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == nil {
if dirty, capacity := shard.dirty+1, mask+1; dirty*goroutineLocksapmRehashThresholdDen >= capacity*goroutineLocksapmRehashThresholdNum {
shard.dirtyMu.Unlock()
shard.rehash(slots)
goto retry
}
slot.key = key
atomic.StorePointer(&slot.val, newVal)
shard.dirty++
atomic.AddUintptr(&shard.count, 1)
shard.dirtyMu.Unlock()
return nil
}
shard.dirtyMu.Unlock()
}
if slotVal == goroutineLocksevacuated() {
goto retry
}
if slot.key == key {
for {
if (compare && oldVal != slotVal) || newVal == slotVal {
if slotVal == goroutineLockstombstone() {
return nil
}
return (*goroutineLocks)(slotVal)
}
if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
if slotVal == goroutineLockstombstone() {
atomic.AddUintptr(&shard.count, 1)
return nil
}
if newVal == goroutineLockstombstone() {
atomic.AddUintptr(&shard.count, ^uintptr(0))
}
return (*goroutineLocks)(slotVal)
}
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == goroutineLocksevacuated() {
goto retry
}
}
}
i = (i + inc) & mask
inc++
}
}
// rehash is marked nosplit to avoid preemption during table copying.
//
//go:nosplit
func (shard *goroutineLocksapmShard) rehash(oldSlots unsafe.Pointer) {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
if shard.slots != oldSlots {
return
}
newSize := uintptr(8)
if oldSlots != nil {
oldSize := shard.mask + 1
newSize = oldSize
if count := atomic.LoadUintptr(&shard.count) + 1; count*goroutineLocksapmExpansionThresholdDen > oldSize*goroutineLocksapmExpansionThresholdNum {
newSize *= 2
}
}
newSlotsSlice := make([]goroutineLocksapmSlot, newSize)
newSlots := unsafe.Pointer(&newSlotsSlice[0])
newMask := newSize - 1
shard.dirtyMu.Lock()
shard.seq.BeginWrite()
if oldSlots != nil {
realCount := uintptr(0)
oldMask := shard.mask
for i := uintptr(0); i <= oldMask; i++ {
oldSlot := goroutineLocksapmSlotAt(oldSlots, i)
val := atomic.SwapPointer(&oldSlot.val, goroutineLocksevacuated())
if val == nil || val == goroutineLockstombstone() {
continue
}
hash := goroutineLockshasher.Hash(oldSlot.key)
j := hash & newMask
inc := uintptr(1)
for {
newSlot := goroutineLocksapmSlotAt(newSlots, j)
if newSlot.val == nil {
newSlot.val = val
newSlot.key = oldSlot.key
break
}
j = (j + inc) & newMask
inc++
}
realCount++
}
shard.dirty = realCount
}
atomic.StorePointer(&shard.slots, newSlots)
atomic.StoreUintptr(&shard.mask, newMask)
shard.seq.EndWrite()
shard.dirtyMu.Unlock()
}
// Range invokes f on each Key-Value pair stored in m. If any call to f returns
// false, Range stops iteration and returns.
//
// Range does not necessarily correspond to any consistent snapshot of the
// Map's contents: no Key will be visited more than once, but if the Value for
// any Key is stored or deleted concurrently, Range may reflect any mapping for
// that Key from any point during the Range call.
//
// f must not call other methods on m.
func (m *goroutineLocksAtomicPtrMap) Range(f func(key int64, val *goroutineLocks) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
if !shard.doRange(f) {
return
}
}
}
func (shard *goroutineLocksapmShard) doRange(f func(key int64, val *goroutineLocks) bool) bool {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
slots := shard.slots
if slots == nil {
return true
}
mask := shard.mask
for i := uintptr(0); i <= mask; i++ {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil || slotVal == goroutineLockstombstone() {
continue
}
if !f(slot.key, (*goroutineLocks)(slotVal)) {
return false
}
}
return true
}
// RangeRepeatable is like Range, but:
//
// - RangeRepeatable may visit the same Key multiple times in the presence of
// concurrent mutators, possibly passing different Values to f in different
// calls.
//
// - It is safe for f to call other methods on m.
func (m *goroutineLocksAtomicPtrMap) RangeRepeatable(f func(key int64, val *goroutineLocks) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
continue
}
for i := uintptr(0); i <= mask; i++ {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == goroutineLocksevacuated() {
goto retry
}
if slotVal == nil || slotVal == goroutineLockstombstone() {
continue
}
if !f(slot.key, (*goroutineLocks)(slotVal)) {
return
}
}
}
}

View File

@@ -0,0 +1,191 @@
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build lockdep
// +build lockdep
package locking
import (
"fmt"
"reflect"
"strings"
"gvisor.dev/gvisor/pkg/goid"
"gvisor.dev/gvisor/pkg/log"
)
// NewMutexClass allocates a new mutex class.
func NewMutexClass(t reflect.Type, lockNames []string) *MutexClass {
c := &MutexClass{
typ: t,
nestedLockNames: lockNames,
nestedLockClasses: make([]*MutexClass, len(lockNames)),
}
for i := range lockNames {
c.nestedLockClasses[i] = NewMutexClass(t, nil)
c.nestedLockClasses[i].lockName = lockNames[i]
}
return c
}
// MutexClass describes dependencies of a specific class.
type MutexClass struct {
// The type of the mutex.
typ reflect.Type
// Name of the nested lock of the above type.
lockName string
// ancestors are locks that are locked before the current class.
ancestors ancestorsAtomicPtrMap
// nestedLockNames is a list of names for nested locks which are considered difference instances
// of the same lock class.
nestedLockNames []string
// namedLockClasses is a list of MutexClass instances of the same mutex class, but that are
// considered OK to lock simultaneously with each other, as well as with this mutex class.
// This is used for nested locking, where multiple instances of the same lock class are used
// simultaneously.
// Maps one-to-one with nestedLockNames.
nestedLockClasses []*MutexClass
}
func (m *MutexClass) String() string {
if m.lockName == "" {
return m.typ.String()
}
return fmt.Sprintf("%s[%s]", m.typ.String(), m.lockName)
}
type goroutineLocks map[*MutexClass]bool
var routineLocks goroutineLocksAtomicPtrMap
// maxChainLen is the maximum length of a lock chain.
const maxChainLen = 32
// checkLock checks that class isn't in the ancestors of prevClass.
func checkLock(class *MutexClass, prevClass *MutexClass, chain []*MutexClass) {
chain = append(chain, prevClass)
if len(chain) >= maxChainLen {
// It can be a race condition with another thread that added
// the lock to the graph but don't complete the validation.
var b strings.Builder
fmt.Fprintf(&b, "WARNING: The maximum lock depth has been reached: %s", chain[0])
for i := 1; i < len(chain); i++ {
fmt.Fprintf(&b, "-> %s", chain[i])
}
log.Warningf("%s", b.String())
return
}
if c := prevClass.ancestors.Load(class); c != nil {
var b strings.Builder
fmt.Fprintf(&b, "WARNING: circular locking detected: %s -> %s:\n%s\n",
chain[0], class, log.LocalStack(3))
fmt.Fprintf(&b, "known lock chain: ")
c := class
for i := len(chain) - 1; i >= 0; i-- {
fmt.Fprintf(&b, "%s -> ", c)
c = chain[i]
}
fmt.Fprintf(&b, "%s\n", chain[0])
c = class
for i := len(chain) - 1; i >= 0; i-- {
fmt.Fprintf(&b, "\n====== %s -> %s =====\n%s",
c, chain[i], *chain[i].ancestors.Load(c))
c = chain[i]
}
panic(b.String())
}
prevClass.ancestors.RangeRepeatable(func(parentClass *MutexClass, stacks *string) bool {
// The recursion is fine here. If it fails, you need to reduce
// a number of nested locks.
checkLock(class, parentClass, chain)
return true
})
}
// AddGLock records a lock to the current goroutine and updates dependencies.
func AddGLock(class *MutexClass, lockNameIndex int) {
gid := goid.Get()
if lockNameIndex != -1 {
class = class.nestedLockClasses[lockNameIndex]
}
currentLocks := routineLocks.Load(gid)
if currentLocks == nil {
locks := goroutineLocks(make(map[*MutexClass]bool))
locks[class] = true
routineLocks.Store(gid, &locks)
return
}
if (*currentLocks)[class] {
panic(fmt.Sprintf("nested locking: %s:\n%s", class, log.LocalStack(2)))
}
(*currentLocks)[class] = true
// Check dependencies and add locked mutexes to the ancestors list.
for prevClass := range *currentLocks {
if prevClass == class {
continue
}
checkLock(class, prevClass, nil)
if c := class.ancestors.Load(prevClass); c == nil {
stacks := string(log.LocalStack(2))
class.ancestors.Store(prevClass, &stacks)
}
}
}
// DelGLock deletes a lock from the current goroutine.
func DelGLock(class *MutexClass, lockNameIndex int) {
if lockNameIndex != -1 {
class = class.nestedLockClasses[lockNameIndex]
}
gid := goid.Get()
currentLocks := routineLocks.Load(gid)
if currentLocks == nil {
panic("the current goroutine doesn't have locks")
}
if _, ok := (*currentLocks)[class]; !ok {
var b strings.Builder
fmt.Fprintf(&b, "Lock not held: %s:\n", class)
fmt.Fprintf(&b, "Current stack:\n%s\n", string(log.LocalStack(2)))
fmt.Fprintf(&b, "Current locks:\n")
for c := range *currentLocks {
heldToClass := class.ancestors.Load(c)
classToHeld := c.ancestors.Load(class)
if heldToClass == nil && classToHeld == nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (no dependency to/from %s found)\n", c, class)
} else if heldToClass != nil && classToHeld != nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (mutual dependency with %s found, this should never happen)\n", c, class)
} else if heldToClass != nil && classToHeld == nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (dependency: %s -> %s)\n", c, c, class)
fmt.Fprintf(&b, "%s\n\n", *heldToClass)
} else if heldToClass == nil && classToHeld != nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (dependency: %s -> %s)\n", c, class, c)
fmt.Fprintf(&b, "%s\n\n", *classToHeld)
}
}
fmt.Fprintf(&b, "** End of locks held **\n")
panic(b.String())
}
delete(*currentLocks, class)
if len(*currentLocks) == 0 {
routineLocks.Store(gid, nil)
}
}

View File

@@ -0,0 +1,42 @@
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !lockdep
// +build !lockdep
package locking
import (
"reflect"
)
type goroutineLocks map[*MutexClass]bool
// MutexClass is a stub class without the lockdep tag.
type MutexClass struct{}
// NewMutexClass is no-op without the lockdep tag.
func NewMutexClass(reflect.Type, []string) *MutexClass {
return nil
}
// AddGLock is no-op without the lockdep tag.
//
//go:inline
func AddGLock(*MutexClass, int) {}
// DelGLock is no-op without the lockdep tag.
//
//go:inline
func DelGLock(*MutexClass, int) {}

View File

@@ -0,0 +1,28 @@
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package locking implements lock primitives with the correctness validator.
//
// All mutexes are divided on classes and the validator check following conditions:
// - Mutexes of the same class are not taken more than once except cases when
// that is expected.
// - Mutexes are never locked in a reverse order. Lock dependencies are tracked
// on the class level.
//
// The validator is implemented in a very straightforward way. For each mutex
// class, we maintain the ancestors list of all classes that have ever been
// taken before the target one. For each goroutine, we have the list of
// currently locked mutexes. And finally, all lock methods check that
// ancestors of currently locked mutexes don't contain the target one.
package locking