Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

View File

@@ -0,0 +1,36 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync
import (
"sync"
)
// Aliases of standard library types.
type (
// Cond is an alias of sync.Cond.
Cond = sync.Cond
// Locker is an alias of sync.Locker.
Locker = sync.Locker
// Once is an alias of sync.Once.
Once = sync.Once
// Pool is an alias of sync.Pool.
Pool = sync.Pool
// WaitGroup is an alias of sync.WaitGroup.
WaitGroup = sync.WaitGroup
// Map is an alias of sync.Map.
Map = sync.Map
)
// NewCond is a wrapper around sync.NewCond.
func NewCond(l Locker) *Cond {
return sync.NewCond(l)
}

View File

@@ -0,0 +1,19 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !checklocks
// +build !checklocks
package sync
import (
"unsafe"
)
func noteLock(l unsafe.Pointer) {
}
func noteUnlock(l unsafe.Pointer) {
}

View File

@@ -0,0 +1,109 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build checklocks
// +build checklocks
package sync
import (
"fmt"
"strings"
"sync"
"unsafe"
"gvisor.dev/gvisor/pkg/goid"
)
// gLocks contains metadata about the locks held by a goroutine.
type gLocks struct {
locksHeld []unsafe.Pointer
}
// map[goid int]*gLocks
//
// Each key may only be written by the G with the goid it refers to.
//
// Note that entries are not evicted when a G exit, causing unbounded growth
// with new G creation / destruction. If this proves problematic, entries could
// be evicted when no locks are held at the expense of more allocations when
// taking top-level locks.
var locksHeld sync.Map
func getGLocks() *gLocks {
id := goid.Get()
var locks *gLocks
if l, ok := locksHeld.Load(id); ok {
locks = l.(*gLocks)
} else {
locks = &gLocks{
// Initialize space for a few locks.
locksHeld: make([]unsafe.Pointer, 0, 8),
}
locksHeld.Store(id, locks)
}
return locks
}
func noteLock(l unsafe.Pointer) {
locks := getGLocks()
for _, lock := range locks.locksHeld {
if lock == l {
panic(fmt.Sprintf("Deadlock on goroutine %d! Double lock of %p: %+v", goid.Get(), l, locks))
}
}
// Commit only after checking for panic conditions so that this lock
// isn't on the list if the above panic is recovered.
locks.locksHeld = append(locks.locksHeld, l)
}
func noteUnlock(l unsafe.Pointer) {
locks := getGLocks()
if len(locks.locksHeld) == 0 {
panic(fmt.Sprintf("Unlock of %p on goroutine %d without any locks held! All locks:\n%s", l, goid.Get(), dumpLocks()))
}
// Search backwards since callers are most likely to unlock in LIFO order.
length := len(locks.locksHeld)
for i := length - 1; i >= 0; i-- {
if l == locks.locksHeld[i] {
copy(locks.locksHeld[i:length-1], locks.locksHeld[i+1:length])
// Clear last entry to ensure addr can be GC'd.
locks.locksHeld[length-1] = nil
locks.locksHeld = locks.locksHeld[:length-1]
return
}
}
panic(fmt.Sprintf("Unlock of %p on goroutine %d without matching lock! All locks:\n%s", l, goid.Get(), dumpLocks()))
}
func dumpLocks() string {
var s strings.Builder
locksHeld.Range(func(key, value any) bool {
goid := key.(int64)
locks := value.(*gLocks)
// N.B. accessing gLocks of another G is fundamentally racy.
fmt.Fprintf(&s, "goroutine %d:\n", goid)
if len(locks.locksHeld) == 0 {
fmt.Fprintf(&s, "\t<none>\n")
}
for _, lock := range locks.locksHeld {
fmt.Fprintf(&s, "\t%p\n", lock)
}
fmt.Fprintf(&s, "\n")
return true
})
return s.String()
}

View File

@@ -0,0 +1,19 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
// MemoryFenceReads ensures that all preceding memory loads happen before
// following memory loads.
func MemoryFenceReads()

View File

@@ -0,0 +1,26 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build amd64
// +build amd64
#include "textflag.h"
// func MemoryFenceReads()
TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0
// No memory fence is required on x86. However, a compiler fence is
// required to prevent the compiler from reordering memory accesses. The Go
// compiler will not reorder memory accesses around a call to an assembly
// function; compare runtime.publicationBarrier.
RET

View File

@@ -0,0 +1,23 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build arm64
// +build arm64
#include "textflag.h"
// func MemoryFenceReads()
TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0
DMB $0x9 // ISHLD
RET

View File

@@ -0,0 +1,151 @@
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
import (
"fmt"
"math"
"sync/atomic"
"unsafe"
"gvisor.dev/gvisor/pkg/gohacks"
)
// Gate is a synchronization primitive that allows concurrent goroutines to
// "enter" it as long as it hasn't been closed yet. Once it's been closed,
// goroutines cannot enter it anymore, but are allowed to leave, and the closer
// will be informed when all goroutines have left.
//
// Gate is similar to WaitGroup:
//
// - Gate.Enter() is analogous to WaitGroup.Add(1), but may be called even if
// the Gate counter is 0 and fails if Gate.Close() has been called.
//
// - Gate.Leave() is equivalent to WaitGroup.Done().
//
// - Gate.Close() is analogous to WaitGroup.Wait(), but also causes future
//
// calls to Gate.Enter() to fail and may only be called once, from a single
// goroutine.
//
// This is useful, for example, in cases when a goroutine is trying to clean up
// an object for which multiple goroutines have pointers. In such a case, users
// would be required to enter and leave the Gate, and the cleaner would wait
// until all users are gone (and no new ones are allowed) before proceeding.
//
// Users:
//
// if !g.Enter() {
// // Gate is closed, we can't use the object.
// return
// }
//
// // Do something with object.
// [...]
//
// g.Leave()
//
// Closer:
//
// // Prevent new users from using the object, and wait for the existing
// // ones to complete.
// g.Close()
//
// // Clean up the object.
// [...]
type Gate struct {
userCount int32
closingG uintptr
}
const preparingG = 1
// Enter tries to enter the gate. It will succeed if it hasn't been closed yet,
// in which case the caller must eventually call Leave().
//
// This function is thread-safe.
func (g *Gate) Enter() bool {
if atomic.AddInt32(&g.userCount, 1) > 0 {
return true
}
g.leaveAfterFailedEnter()
return false
}
// leaveAfterFailedEnter is identical to Leave, but is marked noinline to
// prevent it from being inlined into Enter, since as of this writing inlining
// Leave into Enter prevents Enter from being inlined into its callers.
//
//go:noinline
func (g *Gate) leaveAfterFailedEnter() {
if atomic.AddInt32(&g.userCount, -1) == math.MinInt32 {
g.leaveClosed()
}
}
// Leave leaves the gate. This must only be called after a successful call to
// Enter(). If the gate has been closed and this is the last one inside the
// gate, it will notify the closer that the gate is done.
//
// This function is thread-safe.
func (g *Gate) Leave() {
if atomic.AddInt32(&g.userCount, -1) == math.MinInt32 {
g.leaveClosed()
}
}
func (g *Gate) leaveClosed() {
if atomic.LoadUintptr(&g.closingG) == 0 {
return
}
if g := atomic.SwapUintptr(&g.closingG, 0); g > preparingG {
goready(g, 0)
}
}
// Close closes the gate, causing future calls to Enter to fail, and waits
// until all goroutines that are currently inside the gate leave before
// returning.
//
// Only one goroutine can call this function.
func (g *Gate) Close() {
if atomic.LoadInt32(&g.userCount) == math.MinInt32 {
// The gate is already closed, with no goroutines inside. For legacy
// reasons, we have to allow Close to be called again in this case.
return
}
if v := atomic.AddInt32(&g.userCount, math.MinInt32); v == math.MinInt32 {
// userCount was already 0.
return
} else if v >= 0 {
panic("concurrent Close of sync.Gate")
}
if g := atomic.SwapUintptr(&g.closingG, preparingG); g != 0 {
panic(fmt.Sprintf("invalid sync.Gate.closingG during Close: %#x", g))
}
if atomic.LoadInt32(&g.userCount) == math.MinInt32 {
// The last call to Leave arrived while we were setting up closingG.
return
}
// WaitReasonSemacquire/TraceBlockSync are consistent with WaitGroup.
gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceBlockSync, 0)
}
//go:norace
//go:nosplit
func gateCommit(g uintptr, closingG unsafe.Pointer) bool {
return RaceUncheckedAtomicCompareAndSwapUintptr((*uintptr)(closingG), preparingG, g)
}

View File

@@ -0,0 +1,18 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.13 && !go1.14
// +build go1.13,!go1.14
package sync
import (
"runtime"
)
func goyield() {
// goyield is not available until Go 1.14.
runtime.Gosched()
}

View File

@@ -0,0 +1,20 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.14
// +build go1.14
// //go:linkname directives type-checked by checklinkname. Any other
// non-linkname assumptions outside the Go 1 compatibility guarantee should
// have an accompanied vet check or version guard build tag.
package sync
import (
_ "unsafe" // for go:linkname
)
//go:linkname goyield runtime.goyield
func goyield()

View File

@@ -0,0 +1,445 @@
package locking
import (
"sync/atomic"
"unsafe"
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/sync"
)
const (
// ShardOrder is an optional parameter specifying the base-2 log of the
// number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
// unnecessary synchronization between unrelated concurrent operations,
// improving performance for write-heavy workloads, but increase memory
// usage for small maps.
ancestorsShardOrder = 0
)
// Hasher is an optional type parameter. If Hasher is provided, it must define
// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
type ancestorsHasher struct {
ancestorsdefaultHasher
}
// defaultHasher is the default Hasher. This indirection exists because
// defaultHasher must exist even if a custom Hasher is provided, to prevent the
// Go compiler from complaining about defaultHasher's unused imports.
type ancestorsdefaultHasher struct {
fn func(unsafe.Pointer, uintptr) uintptr
seed uintptr
}
// Init initializes the Hasher.
func (h *ancestorsdefaultHasher) Init() {
h.fn = sync.MapKeyHasher(map[*MutexClass]*string(nil))
h.seed = sync.RandUintptr()
}
// Hash returns the hash value for the given Key.
func (h *ancestorsdefaultHasher) Hash(key *MutexClass) uintptr {
return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
}
var ancestorshasher ancestorsHasher
func init() {
ancestorshasher.Init()
}
// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
// safe for concurrent use from multiple goroutines without additional
// synchronization.
//
// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
// use. AtomicPtrMaps must not be copied after first use.
//
// sync.Map may be faster than AtomicPtrMap if most operations on the map are
// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
// other circumstances.
type ancestorsAtomicPtrMap struct {
shards [1 << ancestorsShardOrder]ancestorsapmShard
}
func (m *ancestorsAtomicPtrMap) shard(hash uintptr) *ancestorsapmShard {
// Go defines right shifts >= width of shifted unsigned operand as 0, so
// this is correct even if ShardOrder is 0 (although nogo complains because
// nogo is dumb).
const indexLSB = unsafe.Sizeof(uintptr(0))*8 - ancestorsShardOrder
index := hash >> indexLSB
return (*ancestorsapmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(ancestorsapmShard{}))))
}
type ancestorsapmShard struct {
ancestorsapmShardMutationData
_ [ancestorsapmShardMutationDataPadding]byte
ancestorsapmShardLookupData
_ [ancestorsapmShardLookupDataPadding]byte
}
type ancestorsapmShardMutationData struct {
dirtyMu sync.Mutex // serializes slot transitions out of empty
dirty uintptr // # slots with val != nil
count uintptr // # slots with val != nil and val != tombstone()
rehashMu sync.Mutex // serializes rehashing
}
type ancestorsapmShardLookupData struct {
seq sync.SeqCount // allows atomic reads of slots+mask
slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
}
const (
ancestorscacheLineBytes = 64
// Cache line padding is enabled if sharding is.
ancestorsapmEnablePadding = (ancestorsShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
// The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
// cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
ancestorsapmShardMutationDataRequiredPadding = ancestorscacheLineBytes - (((unsafe.Sizeof(ancestorsapmShardMutationData{}) - 1) % ancestorscacheLineBytes) + 1)
ancestorsapmShardMutationDataPadding = ancestorsapmEnablePadding * ancestorsapmShardMutationDataRequiredPadding
ancestorsapmShardLookupDataRequiredPadding = ancestorscacheLineBytes - (((unsafe.Sizeof(ancestorsapmShardLookupData{}) - 1) % ancestorscacheLineBytes) + 1)
ancestorsapmShardLookupDataPadding = ancestorsapmEnablePadding * ancestorsapmShardLookupDataRequiredPadding
// These define fractional thresholds for when apmShard.rehash() is called
// (i.e. the load factor) and when it rehases to a larger table
// respectively. They are chosen such that the rehash threshold = the
// expansion threshold + 1/2, so that when reuse of deleted slots is rare
// or non-existent, rehashing occurs after the insertion of at least 1/2
// the table's size in new entries, which is acceptably infrequent.
ancestorsapmRehashThresholdNum = 2
ancestorsapmRehashThresholdDen = 3
ancestorsapmExpansionThresholdNum = 1
ancestorsapmExpansionThresholdDen = 6
)
type ancestorsapmSlot struct {
// slot states are indicated by val:
//
// * Empty: val == nil; key is meaningless. May transition to full or
// evacuated with dirtyMu locked.
//
// * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
// is the Value mapped to key. May transition to deleted or evacuated.
//
// * Deleted: val == tombstone(); key is still immutable. key is mapped to
// no Value. May transition to full or evacuated.
//
// * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
// slots that have already been moved, requiring readers to wait for
// rehashing to complete and use the new table. Terminal state.
//
// Note that once val is non-nil, it cannot become nil again. That is, the
// transition from empty to non-empty is irreversible for a given slot;
// the only way to create more empty slots is by rehashing.
val unsafe.Pointer
key *MutexClass
}
func ancestorsapmSlotAt(slots unsafe.Pointer, pos uintptr) *ancestorsapmSlot {
return (*ancestorsapmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(ancestorsapmSlot{})))
}
var ancestorstombstoneObj byte
func ancestorstombstone() unsafe.Pointer {
return unsafe.Pointer(&ancestorstombstoneObj)
}
var ancestorsevacuatedObj byte
func ancestorsevacuated() unsafe.Pointer {
return unsafe.Pointer(&ancestorsevacuatedObj)
}
// Load returns the Value stored in m for key.
func (m *ancestorsAtomicPtrMap) Load(key *MutexClass) *string {
hash := ancestorshasher.Hash(key)
shard := m.shard(hash)
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
return nil
}
i := hash & mask
inc := uintptr(1)
for {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
return nil
}
if slotVal == ancestorsevacuated() {
goto retry
}
if slot.key == key {
if slotVal == ancestorstombstone() {
return nil
}
return (*string)(slotVal)
}
i = (i + inc) & mask
inc++
}
}
// Store stores the Value val for key.
func (m *ancestorsAtomicPtrMap) Store(key *MutexClass, val *string) {
m.maybeCompareAndSwap(key, false, nil, val)
}
// Swap stores the Value val for key and returns the previously-mapped Value.
func (m *ancestorsAtomicPtrMap) Swap(key *MutexClass, val *string) *string {
return m.maybeCompareAndSwap(key, false, nil, val)
}
// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
// stores the Value newVal for key. CompareAndSwap returns the previous Value
// stored for key, whether or not it stores newVal.
func (m *ancestorsAtomicPtrMap) CompareAndSwap(key *MutexClass, oldVal, newVal *string) *string {
return m.maybeCompareAndSwap(key, true, oldVal, newVal)
}
func (m *ancestorsAtomicPtrMap) maybeCompareAndSwap(key *MutexClass, compare bool, typedOldVal, typedNewVal *string) *string {
hash := ancestorshasher.Hash(key)
shard := m.shard(hash)
oldVal := ancestorstombstone()
if typedOldVal != nil {
oldVal = unsafe.Pointer(typedOldVal)
}
newVal := ancestorstombstone()
if typedNewVal != nil {
newVal = unsafe.Pointer(typedNewVal)
}
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
if (compare && oldVal != ancestorstombstone()) || newVal == ancestorstombstone() {
return nil
}
shard.rehash(nil)
goto retry
}
i := hash & mask
inc := uintptr(1)
for {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
if (compare && oldVal != ancestorstombstone()) || newVal == ancestorstombstone() {
return nil
}
shard.dirtyMu.Lock()
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == nil {
if dirty, capacity := shard.dirty+1, mask+1; dirty*ancestorsapmRehashThresholdDen >= capacity*ancestorsapmRehashThresholdNum {
shard.dirtyMu.Unlock()
shard.rehash(slots)
goto retry
}
slot.key = key
atomic.StorePointer(&slot.val, newVal)
shard.dirty++
atomic.AddUintptr(&shard.count, 1)
shard.dirtyMu.Unlock()
return nil
}
shard.dirtyMu.Unlock()
}
if slotVal == ancestorsevacuated() {
goto retry
}
if slot.key == key {
for {
if (compare && oldVal != slotVal) || newVal == slotVal {
if slotVal == ancestorstombstone() {
return nil
}
return (*string)(slotVal)
}
if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
if slotVal == ancestorstombstone() {
atomic.AddUintptr(&shard.count, 1)
return nil
}
if newVal == ancestorstombstone() {
atomic.AddUintptr(&shard.count, ^uintptr(0))
}
return (*string)(slotVal)
}
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == ancestorsevacuated() {
goto retry
}
}
}
i = (i + inc) & mask
inc++
}
}
// rehash is marked nosplit to avoid preemption during table copying.
//
//go:nosplit
func (shard *ancestorsapmShard) rehash(oldSlots unsafe.Pointer) {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
if shard.slots != oldSlots {
return
}
newSize := uintptr(8)
if oldSlots != nil {
oldSize := shard.mask + 1
newSize = oldSize
if count := atomic.LoadUintptr(&shard.count) + 1; count*ancestorsapmExpansionThresholdDen > oldSize*ancestorsapmExpansionThresholdNum {
newSize *= 2
}
}
newSlotsSlice := make([]ancestorsapmSlot, newSize)
newSlots := unsafe.Pointer(&newSlotsSlice[0])
newMask := newSize - 1
shard.dirtyMu.Lock()
shard.seq.BeginWrite()
if oldSlots != nil {
realCount := uintptr(0)
oldMask := shard.mask
for i := uintptr(0); i <= oldMask; i++ {
oldSlot := ancestorsapmSlotAt(oldSlots, i)
val := atomic.SwapPointer(&oldSlot.val, ancestorsevacuated())
if val == nil || val == ancestorstombstone() {
continue
}
hash := ancestorshasher.Hash(oldSlot.key)
j := hash & newMask
inc := uintptr(1)
for {
newSlot := ancestorsapmSlotAt(newSlots, j)
if newSlot.val == nil {
newSlot.val = val
newSlot.key = oldSlot.key
break
}
j = (j + inc) & newMask
inc++
}
realCount++
}
shard.dirty = realCount
}
atomic.StorePointer(&shard.slots, newSlots)
atomic.StoreUintptr(&shard.mask, newMask)
shard.seq.EndWrite()
shard.dirtyMu.Unlock()
}
// Range invokes f on each Key-Value pair stored in m. If any call to f returns
// false, Range stops iteration and returns.
//
// Range does not necessarily correspond to any consistent snapshot of the
// Map's contents: no Key will be visited more than once, but if the Value for
// any Key is stored or deleted concurrently, Range may reflect any mapping for
// that Key from any point during the Range call.
//
// f must not call other methods on m.
func (m *ancestorsAtomicPtrMap) Range(f func(key *MutexClass, val *string) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
if !shard.doRange(f) {
return
}
}
}
func (shard *ancestorsapmShard) doRange(f func(key *MutexClass, val *string) bool) bool {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
slots := shard.slots
if slots == nil {
return true
}
mask := shard.mask
for i := uintptr(0); i <= mask; i++ {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil || slotVal == ancestorstombstone() {
continue
}
if !f(slot.key, (*string)(slotVal)) {
return false
}
}
return true
}
// RangeRepeatable is like Range, but:
//
// - RangeRepeatable may visit the same Key multiple times in the presence of
// concurrent mutators, possibly passing different Values to f in different
// calls.
//
// - It is safe for f to call other methods on m.
func (m *ancestorsAtomicPtrMap) RangeRepeatable(f func(key *MutexClass, val *string) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
continue
}
for i := uintptr(0); i <= mask; i++ {
slot := ancestorsapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == ancestorsevacuated() {
goto retry
}
if slotVal == nil || slotVal == ancestorstombstone() {
continue
}
if !f(slot.key, (*string)(slotVal)) {
return
}
}
}
}

View File

@@ -0,0 +1,445 @@
package locking
import (
"sync/atomic"
"unsafe"
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/sync"
)
const (
// ShardOrder is an optional parameter specifying the base-2 log of the
// number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
// unnecessary synchronization between unrelated concurrent operations,
// improving performance for write-heavy workloads, but increase memory
// usage for small maps.
goroutineLocksShardOrder = 0
)
// Hasher is an optional type parameter. If Hasher is provided, it must define
// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
type goroutineLocksHasher struct {
goroutineLocksdefaultHasher
}
// defaultHasher is the default Hasher. This indirection exists because
// defaultHasher must exist even if a custom Hasher is provided, to prevent the
// Go compiler from complaining about defaultHasher's unused imports.
type goroutineLocksdefaultHasher struct {
fn func(unsafe.Pointer, uintptr) uintptr
seed uintptr
}
// Init initializes the Hasher.
func (h *goroutineLocksdefaultHasher) Init() {
h.fn = sync.MapKeyHasher(map[int64]*goroutineLocks(nil))
h.seed = sync.RandUintptr()
}
// Hash returns the hash value for the given Key.
func (h *goroutineLocksdefaultHasher) Hash(key int64) uintptr {
return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
}
var goroutineLockshasher goroutineLocksHasher
func init() {
goroutineLockshasher.Init()
}
// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
// safe for concurrent use from multiple goroutines without additional
// synchronization.
//
// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
// use. AtomicPtrMaps must not be copied after first use.
//
// sync.Map may be faster than AtomicPtrMap if most operations on the map are
// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
// other circumstances.
type goroutineLocksAtomicPtrMap struct {
shards [1 << goroutineLocksShardOrder]goroutineLocksapmShard
}
func (m *goroutineLocksAtomicPtrMap) shard(hash uintptr) *goroutineLocksapmShard {
// Go defines right shifts >= width of shifted unsigned operand as 0, so
// this is correct even if ShardOrder is 0 (although nogo complains because
// nogo is dumb).
const indexLSB = unsafe.Sizeof(uintptr(0))*8 - goroutineLocksShardOrder
index := hash >> indexLSB
return (*goroutineLocksapmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(goroutineLocksapmShard{}))))
}
type goroutineLocksapmShard struct {
goroutineLocksapmShardMutationData
_ [goroutineLocksapmShardMutationDataPadding]byte
goroutineLocksapmShardLookupData
_ [goroutineLocksapmShardLookupDataPadding]byte
}
type goroutineLocksapmShardMutationData struct {
dirtyMu sync.Mutex // serializes slot transitions out of empty
dirty uintptr // # slots with val != nil
count uintptr // # slots with val != nil and val != tombstone()
rehashMu sync.Mutex // serializes rehashing
}
type goroutineLocksapmShardLookupData struct {
seq sync.SeqCount // allows atomic reads of slots+mask
slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
}
const (
goroutineLockscacheLineBytes = 64
// Cache line padding is enabled if sharding is.
goroutineLocksapmEnablePadding = (goroutineLocksShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
// The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
// cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
goroutineLocksapmShardMutationDataRequiredPadding = goroutineLockscacheLineBytes - (((unsafe.Sizeof(goroutineLocksapmShardMutationData{}) - 1) % goroutineLockscacheLineBytes) + 1)
goroutineLocksapmShardMutationDataPadding = goroutineLocksapmEnablePadding * goroutineLocksapmShardMutationDataRequiredPadding
goroutineLocksapmShardLookupDataRequiredPadding = goroutineLockscacheLineBytes - (((unsafe.Sizeof(goroutineLocksapmShardLookupData{}) - 1) % goroutineLockscacheLineBytes) + 1)
goroutineLocksapmShardLookupDataPadding = goroutineLocksapmEnablePadding * goroutineLocksapmShardLookupDataRequiredPadding
// These define fractional thresholds for when apmShard.rehash() is called
// (i.e. the load factor) and when it rehases to a larger table
// respectively. They are chosen such that the rehash threshold = the
// expansion threshold + 1/2, so that when reuse of deleted slots is rare
// or non-existent, rehashing occurs after the insertion of at least 1/2
// the table's size in new entries, which is acceptably infrequent.
goroutineLocksapmRehashThresholdNum = 2
goroutineLocksapmRehashThresholdDen = 3
goroutineLocksapmExpansionThresholdNum = 1
goroutineLocksapmExpansionThresholdDen = 6
)
type goroutineLocksapmSlot struct {
// slot states are indicated by val:
//
// * Empty: val == nil; key is meaningless. May transition to full or
// evacuated with dirtyMu locked.
//
// * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
// is the Value mapped to key. May transition to deleted or evacuated.
//
// * Deleted: val == tombstone(); key is still immutable. key is mapped to
// no Value. May transition to full or evacuated.
//
// * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
// slots that have already been moved, requiring readers to wait for
// rehashing to complete and use the new table. Terminal state.
//
// Note that once val is non-nil, it cannot become nil again. That is, the
// transition from empty to non-empty is irreversible for a given slot;
// the only way to create more empty slots is by rehashing.
val unsafe.Pointer
key int64
}
func goroutineLocksapmSlotAt(slots unsafe.Pointer, pos uintptr) *goroutineLocksapmSlot {
return (*goroutineLocksapmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(goroutineLocksapmSlot{})))
}
var goroutineLockstombstoneObj byte
func goroutineLockstombstone() unsafe.Pointer {
return unsafe.Pointer(&goroutineLockstombstoneObj)
}
var goroutineLocksevacuatedObj byte
func goroutineLocksevacuated() unsafe.Pointer {
return unsafe.Pointer(&goroutineLocksevacuatedObj)
}
// Load returns the Value stored in m for key.
func (m *goroutineLocksAtomicPtrMap) Load(key int64) *goroutineLocks {
hash := goroutineLockshasher.Hash(key)
shard := m.shard(hash)
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
return nil
}
i := hash & mask
inc := uintptr(1)
for {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
return nil
}
if slotVal == goroutineLocksevacuated() {
goto retry
}
if slot.key == key {
if slotVal == goroutineLockstombstone() {
return nil
}
return (*goroutineLocks)(slotVal)
}
i = (i + inc) & mask
inc++
}
}
// Store stores the Value val for key.
func (m *goroutineLocksAtomicPtrMap) Store(key int64, val *goroutineLocks) {
m.maybeCompareAndSwap(key, false, nil, val)
}
// Swap stores the Value val for key and returns the previously-mapped Value.
func (m *goroutineLocksAtomicPtrMap) Swap(key int64, val *goroutineLocks) *goroutineLocks {
return m.maybeCompareAndSwap(key, false, nil, val)
}
// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
// stores the Value newVal for key. CompareAndSwap returns the previous Value
// stored for key, whether or not it stores newVal.
func (m *goroutineLocksAtomicPtrMap) CompareAndSwap(key int64, oldVal, newVal *goroutineLocks) *goroutineLocks {
return m.maybeCompareAndSwap(key, true, oldVal, newVal)
}
func (m *goroutineLocksAtomicPtrMap) maybeCompareAndSwap(key int64, compare bool, typedOldVal, typedNewVal *goroutineLocks) *goroutineLocks {
hash := goroutineLockshasher.Hash(key)
shard := m.shard(hash)
oldVal := goroutineLockstombstone()
if typedOldVal != nil {
oldVal = unsafe.Pointer(typedOldVal)
}
newVal := goroutineLockstombstone()
if typedNewVal != nil {
newVal = unsafe.Pointer(typedNewVal)
}
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
if (compare && oldVal != goroutineLockstombstone()) || newVal == goroutineLockstombstone() {
return nil
}
shard.rehash(nil)
goto retry
}
i := hash & mask
inc := uintptr(1)
for {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
if (compare && oldVal != goroutineLockstombstone()) || newVal == goroutineLockstombstone() {
return nil
}
shard.dirtyMu.Lock()
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == nil {
if dirty, capacity := shard.dirty+1, mask+1; dirty*goroutineLocksapmRehashThresholdDen >= capacity*goroutineLocksapmRehashThresholdNum {
shard.dirtyMu.Unlock()
shard.rehash(slots)
goto retry
}
slot.key = key
atomic.StorePointer(&slot.val, newVal)
shard.dirty++
atomic.AddUintptr(&shard.count, 1)
shard.dirtyMu.Unlock()
return nil
}
shard.dirtyMu.Unlock()
}
if slotVal == goroutineLocksevacuated() {
goto retry
}
if slot.key == key {
for {
if (compare && oldVal != slotVal) || newVal == slotVal {
if slotVal == goroutineLockstombstone() {
return nil
}
return (*goroutineLocks)(slotVal)
}
if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
if slotVal == goroutineLockstombstone() {
atomic.AddUintptr(&shard.count, 1)
return nil
}
if newVal == goroutineLockstombstone() {
atomic.AddUintptr(&shard.count, ^uintptr(0))
}
return (*goroutineLocks)(slotVal)
}
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == goroutineLocksevacuated() {
goto retry
}
}
}
i = (i + inc) & mask
inc++
}
}
// rehash is marked nosplit to avoid preemption during table copying.
//
//go:nosplit
func (shard *goroutineLocksapmShard) rehash(oldSlots unsafe.Pointer) {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
if shard.slots != oldSlots {
return
}
newSize := uintptr(8)
if oldSlots != nil {
oldSize := shard.mask + 1
newSize = oldSize
if count := atomic.LoadUintptr(&shard.count) + 1; count*goroutineLocksapmExpansionThresholdDen > oldSize*goroutineLocksapmExpansionThresholdNum {
newSize *= 2
}
}
newSlotsSlice := make([]goroutineLocksapmSlot, newSize)
newSlots := unsafe.Pointer(&newSlotsSlice[0])
newMask := newSize - 1
shard.dirtyMu.Lock()
shard.seq.BeginWrite()
if oldSlots != nil {
realCount := uintptr(0)
oldMask := shard.mask
for i := uintptr(0); i <= oldMask; i++ {
oldSlot := goroutineLocksapmSlotAt(oldSlots, i)
val := atomic.SwapPointer(&oldSlot.val, goroutineLocksevacuated())
if val == nil || val == goroutineLockstombstone() {
continue
}
hash := goroutineLockshasher.Hash(oldSlot.key)
j := hash & newMask
inc := uintptr(1)
for {
newSlot := goroutineLocksapmSlotAt(newSlots, j)
if newSlot.val == nil {
newSlot.val = val
newSlot.key = oldSlot.key
break
}
j = (j + inc) & newMask
inc++
}
realCount++
}
shard.dirty = realCount
}
atomic.StorePointer(&shard.slots, newSlots)
atomic.StoreUintptr(&shard.mask, newMask)
shard.seq.EndWrite()
shard.dirtyMu.Unlock()
}
// Range invokes f on each Key-Value pair stored in m. If any call to f returns
// false, Range stops iteration and returns.
//
// Range does not necessarily correspond to any consistent snapshot of the
// Map's contents: no Key will be visited more than once, but if the Value for
// any Key is stored or deleted concurrently, Range may reflect any mapping for
// that Key from any point during the Range call.
//
// f must not call other methods on m.
func (m *goroutineLocksAtomicPtrMap) Range(f func(key int64, val *goroutineLocks) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
if !shard.doRange(f) {
return
}
}
}
func (shard *goroutineLocksapmShard) doRange(f func(key int64, val *goroutineLocks) bool) bool {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
slots := shard.slots
if slots == nil {
return true
}
mask := shard.mask
for i := uintptr(0); i <= mask; i++ {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil || slotVal == goroutineLockstombstone() {
continue
}
if !f(slot.key, (*goroutineLocks)(slotVal)) {
return false
}
}
return true
}
// RangeRepeatable is like Range, but:
//
// - RangeRepeatable may visit the same Key multiple times in the presence of
// concurrent mutators, possibly passing different Values to f in different
// calls.
//
// - It is safe for f to call other methods on m.
func (m *goroutineLocksAtomicPtrMap) RangeRepeatable(f func(key int64, val *goroutineLocks) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
continue
}
for i := uintptr(0); i <= mask; i++ {
slot := goroutineLocksapmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == goroutineLocksevacuated() {
goto retry
}
if slotVal == nil || slotVal == goroutineLockstombstone() {
continue
}
if !f(slot.key, (*goroutineLocks)(slotVal)) {
return
}
}
}
}

View File

@@ -0,0 +1,191 @@
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build lockdep
// +build lockdep
package locking
import (
"fmt"
"reflect"
"strings"
"gvisor.dev/gvisor/pkg/goid"
"gvisor.dev/gvisor/pkg/log"
)
// NewMutexClass allocates a new mutex class.
func NewMutexClass(t reflect.Type, lockNames []string) *MutexClass {
c := &MutexClass{
typ: t,
nestedLockNames: lockNames,
nestedLockClasses: make([]*MutexClass, len(lockNames)),
}
for i := range lockNames {
c.nestedLockClasses[i] = NewMutexClass(t, nil)
c.nestedLockClasses[i].lockName = lockNames[i]
}
return c
}
// MutexClass describes dependencies of a specific class.
type MutexClass struct {
// The type of the mutex.
typ reflect.Type
// Name of the nested lock of the above type.
lockName string
// ancestors are locks that are locked before the current class.
ancestors ancestorsAtomicPtrMap
// nestedLockNames is a list of names for nested locks which are considered difference instances
// of the same lock class.
nestedLockNames []string
// namedLockClasses is a list of MutexClass instances of the same mutex class, but that are
// considered OK to lock simultaneously with each other, as well as with this mutex class.
// This is used for nested locking, where multiple instances of the same lock class are used
// simultaneously.
// Maps one-to-one with nestedLockNames.
nestedLockClasses []*MutexClass
}
func (m *MutexClass) String() string {
if m.lockName == "" {
return m.typ.String()
}
return fmt.Sprintf("%s[%s]", m.typ.String(), m.lockName)
}
type goroutineLocks map[*MutexClass]bool
var routineLocks goroutineLocksAtomicPtrMap
// maxChainLen is the maximum length of a lock chain.
const maxChainLen = 32
// checkLock checks that class isn't in the ancestors of prevClass.
func checkLock(class *MutexClass, prevClass *MutexClass, chain []*MutexClass) {
chain = append(chain, prevClass)
if len(chain) >= maxChainLen {
// It can be a race condition with another thread that added
// the lock to the graph but don't complete the validation.
var b strings.Builder
fmt.Fprintf(&b, "WARNING: The maximum lock depth has been reached: %s", chain[0])
for i := 1; i < len(chain); i++ {
fmt.Fprintf(&b, "-> %s", chain[i])
}
log.Warningf("%s", b.String())
return
}
if c := prevClass.ancestors.Load(class); c != nil {
var b strings.Builder
fmt.Fprintf(&b, "WARNING: circular locking detected: %s -> %s:\n%s\n",
chain[0], class, log.LocalStack(3))
fmt.Fprintf(&b, "known lock chain: ")
c := class
for i := len(chain) - 1; i >= 0; i-- {
fmt.Fprintf(&b, "%s -> ", c)
c = chain[i]
}
fmt.Fprintf(&b, "%s\n", chain[0])
c = class
for i := len(chain) - 1; i >= 0; i-- {
fmt.Fprintf(&b, "\n====== %s -> %s =====\n%s",
c, chain[i], *chain[i].ancestors.Load(c))
c = chain[i]
}
panic(b.String())
}
prevClass.ancestors.RangeRepeatable(func(parentClass *MutexClass, stacks *string) bool {
// The recursion is fine here. If it fails, you need to reduce
// a number of nested locks.
checkLock(class, parentClass, chain)
return true
})
}
// AddGLock records a lock to the current goroutine and updates dependencies.
func AddGLock(class *MutexClass, lockNameIndex int) {
gid := goid.Get()
if lockNameIndex != -1 {
class = class.nestedLockClasses[lockNameIndex]
}
currentLocks := routineLocks.Load(gid)
if currentLocks == nil {
locks := goroutineLocks(make(map[*MutexClass]bool))
locks[class] = true
routineLocks.Store(gid, &locks)
return
}
if (*currentLocks)[class] {
panic(fmt.Sprintf("nested locking: %s:\n%s", class, log.LocalStack(2)))
}
(*currentLocks)[class] = true
// Check dependencies and add locked mutexes to the ancestors list.
for prevClass := range *currentLocks {
if prevClass == class {
continue
}
checkLock(class, prevClass, nil)
if c := class.ancestors.Load(prevClass); c == nil {
stacks := string(log.LocalStack(2))
class.ancestors.Store(prevClass, &stacks)
}
}
}
// DelGLock deletes a lock from the current goroutine.
func DelGLock(class *MutexClass, lockNameIndex int) {
if lockNameIndex != -1 {
class = class.nestedLockClasses[lockNameIndex]
}
gid := goid.Get()
currentLocks := routineLocks.Load(gid)
if currentLocks == nil {
panic("the current goroutine doesn't have locks")
}
if _, ok := (*currentLocks)[class]; !ok {
var b strings.Builder
fmt.Fprintf(&b, "Lock not held: %s:\n", class)
fmt.Fprintf(&b, "Current stack:\n%s\n", string(log.LocalStack(2)))
fmt.Fprintf(&b, "Current locks:\n")
for c := range *currentLocks {
heldToClass := class.ancestors.Load(c)
classToHeld := c.ancestors.Load(class)
if heldToClass == nil && classToHeld == nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (no dependency to/from %s found)\n", c, class)
} else if heldToClass != nil && classToHeld != nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (mutual dependency with %s found, this should never happen)\n", c, class)
} else if heldToClass != nil && classToHeld == nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (dependency: %s -> %s)\n", c, c, class)
fmt.Fprintf(&b, "%s\n\n", *heldToClass)
} else if heldToClass == nil && classToHeld != nil {
fmt.Fprintf(&b, "\t- Holding lock: %s (dependency: %s -> %s)\n", c, class, c)
fmt.Fprintf(&b, "%s\n\n", *classToHeld)
}
}
fmt.Fprintf(&b, "** End of locks held **\n")
panic(b.String())
}
delete(*currentLocks, class)
if len(*currentLocks) == 0 {
routineLocks.Store(gid, nil)
}
}

View File

@@ -0,0 +1,42 @@
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !lockdep
// +build !lockdep
package locking
import (
"reflect"
)
type goroutineLocks map[*MutexClass]bool
// MutexClass is a stub class without the lockdep tag.
type MutexClass struct{}
// NewMutexClass is no-op without the lockdep tag.
func NewMutexClass(reflect.Type, []string) *MutexClass {
return nil
}
// AddGLock is no-op without the lockdep tag.
//
//go:inline
func AddGLock(*MutexClass, int) {}
// DelGLock is no-op without the lockdep tag.
//
//go:inline
func DelGLock(*MutexClass, int) {}

View File

@@ -0,0 +1,28 @@
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package locking implements lock primitives with the correctness validator.
//
// All mutexes are divided on classes and the validator check following conditions:
// - Mutexes of the same class are not taken more than once except cases when
// that is expected.
// - Mutexes are never locked in a reverse order. Lock dependencies are tracked
// on the class level.
//
// The validator is implemented in a very straightforward way. For each mutex
// class, we maintain the ancestors list of all classes that have ever been
// taken before the target one. For each goroutine, we have the list of
// currently locked mutexes. And finally, all lock methods check that
// ancestors of currently locked mutexes don't contain the target one.
package locking

View File

@@ -0,0 +1,79 @@
// Copyright 2019 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync
import (
"sync"
"unsafe"
)
// CrossGoroutineMutex is equivalent to Mutex, but it need not be unlocked by a
// the same goroutine that locked the mutex.
type CrossGoroutineMutex struct {
m sync.Mutex
}
// Lock locks the underlying Mutex.
// +checklocksignore
func (m *CrossGoroutineMutex) Lock() {
m.m.Lock()
}
// Unlock unlocks the underlying Mutex.
// +checklocksignore
func (m *CrossGoroutineMutex) Unlock() {
m.m.Unlock()
}
// TryLock tries to acquire the mutex. It returns true if it succeeds and false
// otherwise. TryLock does not block.
func (m *CrossGoroutineMutex) TryLock() bool {
return m.m.TryLock()
}
// Mutex is a mutual exclusion lock. The zero value for a Mutex is an unlocked
// mutex.
//
// A Mutex must not be copied after first use.
//
// A Mutex must be unlocked by the same goroutine that locked it. This
// invariant is enforced with the 'checklocks' build tag.
type Mutex struct {
m CrossGoroutineMutex
}
// Lock locks m. If the lock is already in use, the calling goroutine blocks
// until the mutex is available.
// +checklocksignore
func (m *Mutex) Lock() {
noteLock(unsafe.Pointer(m))
m.m.Lock()
}
// Unlock unlocks m.
//
// Preconditions:
// - m is locked.
// - m was locked by this goroutine.
//
// +checklocksignore
func (m *Mutex) Unlock() {
noteUnlock(unsafe.Pointer(m))
m.m.Unlock()
}
// TryLock tries to acquire the mutex. It returns true if it succeeds and false
// otherwise. TryLock does not block.
// +checklocksignore
func (m *Mutex) TryLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(m))
locked := m.m.TryLock()
if !locked {
noteUnlock(unsafe.Pointer(m))
}
return locked
}

View File

@@ -0,0 +1,28 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
// NoCopy may be embedded into structs which must not be copied
// after the first use.
//
// See https://golang.org/issues/8005#issuecomment-190753527
// for details.
type NoCopy struct{}
// Lock is a no-op used by -copylocks checker from `go vet`.
func (*NoCopy) Lock() {}
// Unlock is a no-op used by -copylocks checker from `go vet`.
func (*NoCopy) Unlock() {}

View File

@@ -0,0 +1,47 @@
// Copyright 2019 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !race
// +build !race
package sync
import (
"sync/atomic"
"unsafe"
)
// RaceEnabled is true if the Go data race detector is enabled.
const RaceEnabled = false
// RaceDisable has the same semantics as runtime.RaceDisable.
func RaceDisable() {
}
// RaceEnable has the same semantics as runtime.RaceEnable.
func RaceEnable() {
}
// RaceAcquire has the same semantics as runtime.RaceAcquire.
func RaceAcquire(addr unsafe.Pointer) {
}
// RaceRelease has the same semantics as runtime.RaceRelease.
func RaceRelease(addr unsafe.Pointer) {
}
// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge.
func RaceReleaseMerge(addr unsafe.Pointer) {
}
// RaceUncheckedAtomicCompareAndSwapUintptr is equivalent to
// sync/atomic.CompareAndSwapUintptr, but is not checked by the race detector.
// This is necessary when implementing gopark callbacks, since no race context
// is available during their execution.
func RaceUncheckedAtomicCompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool {
// Use atomic.CompareAndSwapUintptr outside of race builds for
// inlinability.
return atomic.CompareAndSwapUintptr(ptr, old, new)
}

View File

@@ -0,0 +1,33 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build race && amd64
// +build race,amd64
#include "textflag.h"
// func RaceUncheckedAtomicCompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
TEXT ·RaceUncheckedAtomicCompareAndSwapUintptr(SB),NOSPLIT|NOFRAME,$0-25
MOVQ ptr+0(FP), DI
MOVQ old+8(FP), AX
MOVQ new+16(FP), SI
LOCK
CMPXCHGQ SI, 0(DI)
SETEQ AX
MOVB AX, ret+24(FP)
RET

View File

@@ -0,0 +1,35 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build race && arm64
// +build race,arm64
#include "textflag.h"
// func RaceUncheckedAtomicCompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
TEXT ·RaceUncheckedAtomicCompareAndSwapUintptr(SB),NOSPLIT,$0-25
MOVD ptr+0(FP), R0
MOVD old+8(FP), R1
MOVD new+16(FP), R1
again:
LDAXR (R0), R3
CMP R1, R3
BNE ok
STLXR R2, (R0), R3
CBNZ R3, again
ok:
CSET EQ, R0
MOVB R0, ret+24(FP)
RET

View File

@@ -0,0 +1,48 @@
// Copyright 2019 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build race
// +build race
package sync
import (
"runtime"
"unsafe"
)
// RaceEnabled is true if the Go data race detector is enabled.
const RaceEnabled = true
// RaceDisable has the same semantics as runtime.RaceDisable.
func RaceDisable() {
runtime.RaceDisable()
}
// RaceEnable has the same semantics as runtime.RaceEnable.
func RaceEnable() {
runtime.RaceEnable()
}
// RaceAcquire has the same semantics as runtime.RaceAcquire.
func RaceAcquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
// RaceRelease has the same semantics as runtime.RaceRelease.
func RaceRelease(addr unsafe.Pointer) {
runtime.RaceRelease(addr)
}
// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge.
func RaceReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
// RaceUncheckedAtomicCompareAndSwapUintptr is equivalent to
// sync/atomic.CompareAndSwapUintptr, but is not checked by the race detector.
// This is necessary when implementing gopark callbacks, since no race context
// is available during their execution.
func RaceUncheckedAtomicCompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool

View File

@@ -0,0 +1,22 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
import (
"runtime"
)
// Dummy reference for facts.
const _ = runtime.Compiler

View File

@@ -0,0 +1,30 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64
package sync
import (
"sync/atomic"
)
const supportsWakeSuppression = true
// addrOfSpinning returns the address of runtime.sched.nmspinning.
func addrOfSpinning() *int32
// nmspinning caches addrOfSpinning.
var nmspinning = addrOfSpinning()
//go:nosplit
func preGoReadyWakeSuppression() {
atomic.AddInt32(nmspinning, 1)
}
//go:nosplit
func postGoReadyWakeSuppression() {
atomic.AddInt32(nmspinning, -1)
}

View File

@@ -0,0 +1,22 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
// Values for the reason argument to gopark, from Go's src/runtime/runtime2.go.
const (
WaitReasonSelect uint8 = 9 // +checkconst runtime waitReasonSelect
WaitReasonChanReceive uint8 = 14 // +checkconst runtime waitReasonChanReceive
WaitReasonSemacquire uint8 = 18 // +checkconst runtime waitReasonSemacquire
)

View File

@@ -0,0 +1,21 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
// TraceBlockReason constants, from Go's src/runtime/trace2runtime.go.
const (
TraceBlockSelect TraceBlockReason = 3 // +checkconst runtime traceBlockSelect
TraceBlockSync TraceBlockReason = 5 // +checkconst runtime traceBlockSync
)

View File

@@ -0,0 +1,16 @@
// Copyright 2023 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package sync
import (
"unsafe"
)
// Use checkoffset to assert that maptype.hasher (the only field we use) has
// the correct offset.
const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset internal/abi MapType.Hasher

View File

@@ -0,0 +1,18 @@
// Copyright 2023 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// runtime.maptype is moved to internal/abi.MapType in Go 1.21.
//
//go:build !go1.21
package sync
import (
"unsafe"
)
// Use checkoffset to assert that maptype.hasher (the only field we use) has
// the correct offset.
const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset runtime maptype.hasher

View File

@@ -0,0 +1,14 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64
// +build !amd64
package sync
const supportsWakeSuppression = false
func preGoReadyWakeSuppression() {} // Never called.
func postGoReadyWakeSuppression() {} // Never called.

View File

@@ -0,0 +1,25 @@
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build amd64
#include "textflag.h"
#define NMSPINNING_OFFSET 92 // +checkoffset runtime schedt.nmspinning
TEXT ·addrOfSpinning(SB),NOSPLIT|NOFRAME,$0-8
LEAQ runtime·sched(SB), AX
ADDQ $NMSPINNING_OFFSET, AX
MOVQ AX, ret+0(FP)
RET

View File

@@ -0,0 +1,18 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !amd64
// This file is intentionally left blank. Other arches don't use
// addrOfSpinning, but we still need an input to the nogo template rule.

View File

@@ -0,0 +1,140 @@
// Copyright 2020 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// //go:linkname directives type-checked by checklinkname.
// Runtime type copies checked by checkoffset.
package sync
import (
"fmt"
"reflect"
"unsafe"
)
// Goyield is runtime.goyield, which is similar to runtime.Gosched but only
// yields the processor to other goroutines already on the processor's
// runqueue.
//
//go:nosplit
func Goyield() {
goyield()
}
// Gopark is runtime.gopark. Gopark calls unlockf(pointer to runtime.g, lock);
// if unlockf returns true, Gopark blocks until Goready(pointer to runtime.g)
// is called. unlockf and its callees must be nosplit and norace, since stack
// splitting and race context are not available where it is called.
//
//go:nosplit
func Gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int) {
gopark(unlockf, lock, reason, traceReason, traceskip)
}
//go:linkname gopark runtime.gopark
func gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int)
// TraceBlockReason is equivalent to runtime.traceBlockReason.
type TraceBlockReason uint8
//go:linkname wakep runtime.wakep
func wakep()
// Wakep is runtime.wakep.
//
//go:nosplit
func Wakep() {
// This is only supported if we can suppress the wakep called
// from Goready below, which is in certain architectures only.
if supportsWakeSuppression {
wakep()
}
}
//go:linkname goready runtime.goready
func goready(gp uintptr, traceskip int)
// Goready is runtime.goready.
//
// The additional wakep argument controls whether a new thread will be kicked to
// execute the P. This should be true in most circumstances. However, if the
// current thread is about to sleep, then this can be false for efficiency.
//
//go:nosplit
func Goready(gp uintptr, traceskip int, wakep bool) {
if supportsWakeSuppression && !wakep {
preGoReadyWakeSuppression()
}
goready(gp, traceskip)
if supportsWakeSuppression && !wakep {
postGoReadyWakeSuppression()
}
}
// Rand32 returns a non-cryptographically-secure random uint32.
func Rand32() uint32 {
return fastrand()
}
// Rand64 returns a non-cryptographically-secure random uint64.
func Rand64() uint64 {
return uint64(fastrand())<<32 | uint64(fastrand())
}
//go:linkname fastrand runtime.fastrand
func fastrand() uint32
// RandUintptr returns a non-cryptographically-secure random uintptr.
func RandUintptr() uintptr {
if unsafe.Sizeof(uintptr(0)) == 4 {
return uintptr(Rand32())
}
return uintptr(Rand64())
}
// MapKeyHasher returns a hash function for pointers of m's key type.
//
// Preconditions: m must be a map.
func MapKeyHasher(m any) func(unsafe.Pointer, uintptr) uintptr {
if rtyp := reflect.TypeOf(m); rtyp.Kind() != reflect.Map {
panic(fmt.Sprintf("sync.MapKeyHasher: m is %v, not map", rtyp))
}
mtyp := *(**maptype)(unsafe.Pointer(&m))
return mtyp.Hasher
}
// maptype is equivalent to the beginning of internal/abi.MapType.
type maptype struct {
size uintptr
ptrdata uintptr
hash uint32
tflag uint8
align uint8
fieldAlign uint8
kind uint8
equal func(unsafe.Pointer, unsafe.Pointer) bool
gcdata *byte
str int32
ptrToThis int32
key unsafe.Pointer
elem unsafe.Pointer
bucket unsafe.Pointer
Hasher func(unsafe.Pointer, uintptr) uintptr
// more fields
}
// These functions are only used within the sync package.
//go:linkname semacquire sync.runtime_Semacquire
func semacquire(addr *uint32)
//go:linkname semrelease sync.runtime_Semrelease
func semrelease(addr *uint32, handoff bool, skipframes int)
//go:linkname canSpin sync.runtime_canSpin
func canSpin(i int) bool
//go:linkname doSpin sync.runtime_doSpin
func doSpin()

View File

@@ -0,0 +1,314 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright 2019 The gVisor Authors.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This is mostly copied from the standard library's sync/rwmutex.go.
//
// Happens-before relationships indicated to the race detector:
// - Unlock -> Lock (via writerSem)
// - Unlock -> RLock (via readerSem)
// - RUnlock -> Lock (via writerSem)
// - DowngradeLock -> RLock (via readerSem)
package sync
import (
"sync/atomic"
"unsafe"
)
// CrossGoroutineRWMutex is equivalent to RWMutex, but it need not be unlocked
// by a the same goroutine that locked the mutex.
type CrossGoroutineRWMutex struct {
// w is held if there are pending writers
//
// We use CrossGoroutineMutex rather than Mutex because the lock
// annotation instrumentation in Mutex will trigger false positives in
// the race detector when called inside of RaceDisable.
w CrossGoroutineMutex
writerSem uint32 // semaphore for writers to wait for completing readers
readerSem uint32 // semaphore for readers to wait for completing writers
readerCount int32 // number of pending readers
readerWait int32 // number of departing readers
}
const rwmutexMaxReaders = 1 << 30
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryRLock() bool {
if RaceEnabled {
RaceDisable()
}
for {
rc := atomic.LoadInt32(&rw.readerCount)
if rc < 0 {
if RaceEnabled {
RaceEnable()
}
return false
}
if !atomic.CompareAndSwapInt32(&rw.readerCount, rc, rc+1) {
continue
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.readerSem))
}
return true
}
}
// RLock locks rw for reading.
//
// It should not be used for recursive read locking; a blocked Lock call
// excludes new readers from acquiring the lock. See the documentation on the
// RWMutex type.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) RLock() {
if RaceEnabled {
RaceDisable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
semacquire(&rw.readerSem)
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.readerSem))
}
}
// RUnlock undoes a single RLock call.
//
// Preconditions:
// - rw is locked for reading.
//
// +checklocksignore
func (rw *CrossGoroutineRWMutex) RUnlock() {
if RaceEnabled {
RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
RaceDisable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
panic("RUnlock of unlocked RWMutex")
}
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
// The last reader unblocks the writer.
semrelease(&rw.writerSem, false, 0)
}
}
if RaceEnabled {
RaceEnable()
}
}
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryLock() bool {
if RaceEnabled {
RaceDisable()
}
// First, resolve competition with other writers.
if !rw.w.TryLock() {
if RaceEnabled {
RaceEnable()
}
return false
}
// Only proceed if there are no readers.
if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
rw.w.Unlock()
if RaceEnabled {
RaceEnable()
}
return false
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.writerSem))
}
return true
}
// Lock locks rw for writing. If the lock is already locked for reading or
// writing, Lock blocks until the lock is available.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) Lock() {
if RaceEnabled {
RaceDisable()
}
// First, resolve competition with other writers.
rw.w.Lock()
// Announce to readers there is a pending writer.
r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
// Wait for active readers.
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
semacquire(&rw.writerSem)
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.writerSem))
}
}
// Unlock unlocks rw for writing.
//
// Preconditions:
// - rw is locked for writing.
//
// +checklocksignore
func (rw *CrossGoroutineRWMutex) Unlock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.writerSem))
RaceRelease(unsafe.Pointer(&rw.readerSem))
RaceDisable()
}
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
panic("Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {
semrelease(&rw.readerSem, false, 0)
}
// Allow other writers to proceed.
rw.w.Unlock()
if RaceEnabled {
RaceEnable()
}
}
// DowngradeLock atomically unlocks rw for writing and locks it for reading.
//
// Preconditions:
// - rw is locked for writing.
//
// +checklocksignore
func (rw *CrossGoroutineRWMutex) DowngradeLock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.readerSem))
RaceDisable()
}
// Announce to readers there is no active writer and one additional reader.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
if r >= rwmutexMaxReaders+1 {
panic("DowngradeLock of unlocked RWMutex")
}
// Unblock blocked readers, if any. Note that this loop starts as 1 since r
// includes this goroutine.
for i := 1; i < int(r); i++ {
semrelease(&rw.readerSem, false, 0)
}
// Allow other writers to proceed to rw.w.Lock(). Note that they will still
// block on rw.writerSem since at least this reader exists, such that
// DowngradeLock() is atomic with the previous write lock.
rw.w.Unlock()
if RaceEnabled {
RaceEnable()
}
}
// A RWMutex is a reader/writer mutual exclusion lock. The lock can be held by
// an arbitrary number of readers or a single writer. The zero value for a
// RWMutex is an unlocked mutex.
//
// A RWMutex must not be copied after first use.
//
// If a goroutine holds a RWMutex for reading and another goroutine might call
// Lock, no goroutine should expect to be able to acquire a read lock until the
// initial read lock is released. In particular, this prohibits recursive read
// locking. This is to ensure that the lock eventually becomes available; a
// blocked Lock call excludes new readers from acquiring the lock.
//
// A Mutex must be unlocked by the same goroutine that locked it. This
// invariant is enforced with the 'checklocks' build tag.
type RWMutex struct {
m CrossGoroutineRWMutex
}
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *RWMutex) TryRLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(rw))
locked := rw.m.TryRLock()
if !locked {
noteUnlock(unsafe.Pointer(rw))
}
return locked
}
// RLock locks rw for reading.
//
// It should not be used for recursive read locking; a blocked Lock call
// excludes new readers from acquiring the lock. See the documentation on the
// RWMutex type.
// +checklocksignore
func (rw *RWMutex) RLock() {
noteLock(unsafe.Pointer(rw))
rw.m.RLock()
}
// RUnlock undoes a single RLock call.
//
// Preconditions:
// - rw is locked for reading.
// - rw was locked by this goroutine.
//
// +checklocksignore
func (rw *RWMutex) RUnlock() {
rw.m.RUnlock()
noteUnlock(unsafe.Pointer(rw))
}
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *RWMutex) TryLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(rw))
locked := rw.m.TryLock()
if !locked {
noteUnlock(unsafe.Pointer(rw))
}
return locked
}
// Lock locks rw for writing. If the lock is already locked for reading or
// writing, Lock blocks until the lock is available.
// +checklocksignore
func (rw *RWMutex) Lock() {
noteLock(unsafe.Pointer(rw))
rw.m.Lock()
}
// Unlock unlocks rw for writing.
//
// Preconditions:
// - rw is locked for writing.
// - rw was locked by this goroutine.
//
// +checklocksignore
func (rw *RWMutex) Unlock() {
rw.m.Unlock()
noteUnlock(unsafe.Pointer(rw))
}
// DowngradeLock atomically unlocks rw for writing and locks it for reading.
//
// Preconditions:
// - rw is locked for writing.
//
// +checklocksignore
func (rw *RWMutex) DowngradeLock() {
// No note change for DowngradeLock.
rw.m.DowngradeLock()
}

View File

@@ -0,0 +1,119 @@
// Copyright 2019 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync
import (
"sync/atomic"
)
// SeqCount is a synchronization primitive for optimistic reader/writer
// synchronization in cases where readers can work with stale data and
// therefore do not need to block writers.
//
// Compared to sync/atomic.Value:
//
// - Mutation of SeqCount-protected data does not require memory allocation,
// whereas atomic.Value generally does. This is a significant advantage when
// writes are common.
//
// - Atomic reads of SeqCount-protected data require copying. This is a
// disadvantage when atomic reads are common.
//
// - SeqCount may be more flexible: correct use of SeqCount.ReadOk allows other
// operations to be made atomic with reads of SeqCount-protected data.
//
// - SeqCount is more cumbersome to use; atomic reads of SeqCount-protected
// data require instantiating function templates using go_generics (see
// seqatomic.go).
type SeqCount struct {
// epoch is incremented by BeginWrite and EndWrite, such that epoch is odd
// if a writer critical section is active, and a read from data protected
// by this SeqCount is atomic iff epoch is the same even value before and
// after the read.
epoch uint32
}
// SeqCountEpoch tracks writer critical sections in a SeqCount.
type SeqCountEpoch uint32
// BeginRead indicates the beginning of a reader critical section. Reader
// critical sections DO NOT BLOCK writer critical sections, so operations in a
// reader critical section MAY RACE with writer critical sections. Races are
// detected by ReadOk at the end of the reader critical section. Thus, the
// low-level structure of readers is generally:
//
// for {
// epoch := seq.BeginRead()
// // do something idempotent with seq-protected data
// if seq.ReadOk(epoch) {
// break
// }
// }
//
// However, since reader critical sections may race with writer critical
// sections, the Go race detector will (accurately) flag data races in readers
// using this pattern. Most users of SeqCount will need to use the
// SeqAtomicLoad function template in seqatomic.go.
func (s *SeqCount) BeginRead() SeqCountEpoch {
if epoch := atomic.LoadUint32(&s.epoch); epoch&1 == 0 {
return SeqCountEpoch(epoch)
}
return s.beginReadSlow()
}
func (s *SeqCount) beginReadSlow() SeqCountEpoch {
i := 0
for {
if canSpin(i) {
i++
doSpin()
} else {
goyield()
}
if epoch := atomic.LoadUint32(&s.epoch); epoch&1 == 0 {
return SeqCountEpoch(epoch)
}
}
}
// ReadOk returns true if the reader critical section initiated by a previous
// call to BeginRead() that returned epoch did not race with any writer critical
// sections.
//
// ReadOk may be called any number of times during a reader critical section.
// Reader critical sections do not need to be explicitly terminated; the last
// call to ReadOk is implicitly the end of the reader critical section.
func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool {
MemoryFenceReads()
return atomic.LoadUint32(&s.epoch) == uint32(epoch)
}
// BeginWrite indicates the beginning of a writer critical section.
//
// SeqCount does not support concurrent writer critical sections; clients with
// concurrent writers must synchronize them using e.g. sync.Mutex.
func (s *SeqCount) BeginWrite() {
if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 == 0 {
panic("SeqCount.BeginWrite during writer critical section")
}
}
// BeginWriteOk combines the semantics of ReadOk and BeginWrite. If the reader
// critical section initiated by a previous call to BeginRead() that returned
// epoch did not race with any writer critical sections, it begins a writer
// critical section and returns true. Otherwise it does nothing and returns
// false.
func (s *SeqCount) BeginWriteOk(epoch SeqCountEpoch) bool {
return atomic.CompareAndSwapUint32(&s.epoch, uint32(epoch), uint32(epoch)+1)
}
// EndWrite ends the effect of a preceding BeginWrite or successful
// BeginWriteOk.
func (s *SeqCount) EndWrite() {
if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 != 0 {
panic("SeqCount.EndWrite outside writer critical section")
}
}

View File

@@ -0,0 +1,9 @@
// Copyright 2019 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sync provides synchronization primitives.
//
// +checkalignedignore
package sync