Update dependencies
This commit is contained in:
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go
vendored
Normal file
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Note that this file is *identical* to 32b_64bit.go, as go_stateify gets
|
||||
// confused about build tags if these are not separated.
|
||||
|
||||
// LINT.IfChange
|
||||
|
||||
// Int32 is an atomic int32.
|
||||
//
|
||||
// The default value is zero.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// +stateify savable
|
||||
type Int32 struct {
|
||||
_ sync.NoCopy
|
||||
value int32
|
||||
}
|
||||
|
||||
// FromInt32 returns an Int32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt32(v int32) Int32 {
|
||||
return Int32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Load() int32 {
|
||||
return atomic.LoadInt32(&i.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyLoad() int32 {
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Store(v int32) {
|
||||
atomic.StoreInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyStore(v int32) {
|
||||
i.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Add(v int32) int32 {
|
||||
return atomic.AddInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyAdd(v int32) int32 {
|
||||
i.value += v
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Swap(v int32) int32 {
|
||||
return atomic.SwapInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) CompareAndSwap(oldVal, newVal int32) bool {
|
||||
return atomic.CompareAndSwapInt32(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int32) ptr() *int32 {
|
||||
return &i.value
|
||||
}
|
||||
|
||||
// Uint32 is an atomic uint32.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint32 struct {
|
||||
_ sync.NoCopy
|
||||
value uint32
|
||||
}
|
||||
|
||||
// FromUint32 returns an Uint32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint32(v uint32) Uint32 {
|
||||
return Uint32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Load() uint32 {
|
||||
return atomic.LoadUint32(&u.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyLoad() uint32 {
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Store(v uint32) {
|
||||
atomic.StoreUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyStore(v uint32) {
|
||||
u.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Add(v uint32) uint32 {
|
||||
return atomic.AddUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyAdd(v uint32) uint32 {
|
||||
u.value += v
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Swap(v uint32) uint32 {
|
||||
return atomic.SwapUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) CompareAndSwap(oldVal, newVal uint32) bool {
|
||||
return atomic.CompareAndSwapUint32(&u.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint32) ptr() *uint32 {
|
||||
return &u.value
|
||||
}
|
||||
|
||||
// Bool is an atomic Boolean.
|
||||
//
|
||||
// It is implemented by a Uint32, with value 0 indicating false, and 1
|
||||
// indicating true.
|
||||
//
|
||||
// +stateify savable
|
||||
type Bool struct {
|
||||
Uint32
|
||||
}
|
||||
|
||||
// b32 returns a uint32 0 or 1 representing b.
|
||||
func b32(b bool) uint32 {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// FromBool returns a Bool initialized to value val.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromBool(val bool) Bool {
|
||||
return Bool{
|
||||
Uint32: FromUint32(b32(val)),
|
||||
}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Load() bool {
|
||||
return b.Uint32.Load() != 0
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyLoad() bool {
|
||||
return b.Uint32.RacyLoad() != 0
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Store(val bool) {
|
||||
b.Uint32.Store(b32(val))
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyStore(val bool) {
|
||||
b.Uint32.RacyStore(b32(val))
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Swap(val bool) bool {
|
||||
return b.Uint32.Swap(b32(val)) != 0
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
|
||||
// existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
|
||||
return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
|
||||
}
|
||||
|
||||
// LINT.ThenChange(32b_64bit.go)
|
||||
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go
vendored
Normal file
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !arm && !mips && !mipsle && !386
|
||||
// +build !arm,!mips,!mipsle,!386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Note that this file is *identical* to 32b_32bit.go, as go_stateify gets
|
||||
// confused about build tags if these are not separated.
|
||||
|
||||
// LINT.IfChange
|
||||
|
||||
// Int32 is an atomic int32.
|
||||
//
|
||||
// The default value is zero.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// +stateify savable
|
||||
type Int32 struct {
|
||||
_ sync.NoCopy
|
||||
value int32
|
||||
}
|
||||
|
||||
// FromInt32 returns an Int32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt32(v int32) Int32 {
|
||||
return Int32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Load() int32 {
|
||||
return atomic.LoadInt32(&i.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyLoad() int32 {
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Store(v int32) {
|
||||
atomic.StoreInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyStore(v int32) {
|
||||
i.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Add(v int32) int32 {
|
||||
return atomic.AddInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyAdd(v int32) int32 {
|
||||
i.value += v
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Swap(v int32) int32 {
|
||||
return atomic.SwapInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) CompareAndSwap(oldVal, newVal int32) bool {
|
||||
return atomic.CompareAndSwapInt32(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int32) ptr() *int32 {
|
||||
return &i.value
|
||||
}
|
||||
|
||||
// Uint32 is an atomic uint32.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint32 struct {
|
||||
_ sync.NoCopy
|
||||
value uint32
|
||||
}
|
||||
|
||||
// FromUint32 returns an Uint32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint32(v uint32) Uint32 {
|
||||
return Uint32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Load() uint32 {
|
||||
return atomic.LoadUint32(&u.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyLoad() uint32 {
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Store(v uint32) {
|
||||
atomic.StoreUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyStore(v uint32) {
|
||||
u.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Add(v uint32) uint32 {
|
||||
return atomic.AddUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyAdd(v uint32) uint32 {
|
||||
u.value += v
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Swap(v uint32) uint32 {
|
||||
return atomic.SwapUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) CompareAndSwap(oldVal, newVal uint32) bool {
|
||||
return atomic.CompareAndSwapUint32(&u.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint32) ptr() *uint32 {
|
||||
return &u.value
|
||||
}
|
||||
|
||||
// Bool is an atomic Boolean.
|
||||
//
|
||||
// It is implemented by a Uint32, with value 0 indicating false, and 1
|
||||
// indicating true.
|
||||
//
|
||||
// +stateify savable
|
||||
type Bool struct {
|
||||
Uint32
|
||||
}
|
||||
|
||||
// b32 returns a uint32 0 or 1 representing b.
|
||||
func b32(b bool) uint32 {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// FromBool returns a Bool initialized to value val.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromBool(val bool) Bool {
|
||||
return Bool{
|
||||
Uint32: FromUint32(b32(val)),
|
||||
}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Load() bool {
|
||||
return b.Uint32.Load() != 0
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyLoad() bool {
|
||||
return b.Uint32.RacyLoad() != 0
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Store(val bool) {
|
||||
b.Uint32.Store(b32(val))
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyStore(val bool) {
|
||||
b.Uint32.RacyStore(b32(val))
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Swap(val bool) bool {
|
||||
return b.Uint32.Swap(b32(val)) != 0
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
|
||||
// existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
|
||||
return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
|
||||
}
|
||||
|
||||
// LINT.ThenChange(32b_32bit.go)
|
||||
231
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_32bit_unsafe.go
vendored
Normal file
231
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_32bit_unsafe.go
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright 2021 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Int64 is an atomic int64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// Per https://golang.org/pkg/sync/atomic/#pkg-note-BUG:
|
||||
//
|
||||
// "On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
|
||||
// for 64-bit alignment of 64-bit words accessed atomically. The first word in
|
||||
// a variable or in an allocated struct, array, or slice can be relied upon to
|
||||
// be 64-bit aligned."
|
||||
//
|
||||
// +stateify savable
|
||||
type Int64 struct {
|
||||
_ sync.NoCopy
|
||||
value int64
|
||||
value32 int32
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int64) ptr() *int64 {
|
||||
// On 32-bit systems, i.value is guaranteed to be 32-bit aligned. It means
|
||||
// that in the 12-byte i.value, there are guaranteed to be 8 contiguous bytes
|
||||
// with 64-bit alignment.
|
||||
return (*int64)(unsafe.Pointer((uintptr(unsafe.Pointer(&i.value)) + 4) &^ 7))
|
||||
}
|
||||
|
||||
// FromInt64 returns an Int64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt64(v int64) Int64 {
|
||||
var i Int64
|
||||
*i.ptr() = v
|
||||
return i
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Load() int64 {
|
||||
return atomic.LoadInt64(i.ptr())
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyLoad() int64 {
|
||||
return *i.ptr()
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Store(v int64) {
|
||||
atomic.StoreInt64(i.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyStore(v int64) {
|
||||
*i.ptr() = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Add(v int64) int64 {
|
||||
return atomic.AddInt64(i.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyAdd(v int64) int64 {
|
||||
*i.ptr() += v
|
||||
return *i.ptr()
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Swap(v int64) int64 {
|
||||
return atomic.SwapInt64(i.ptr(), v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) CompareAndSwap(oldVal, newVal int64) bool {
|
||||
return atomic.CompareAndSwapInt64(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
// Uint64 is an atomic uint64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// Per https://golang.org/pkg/sync/atomic/#pkg-note-BUG:
|
||||
//
|
||||
// "On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
|
||||
// for 64-bit alignment of 64-bit words accessed atomically. The first word in
|
||||
// a variable or in an allocated struct, array, or slice can be relied upon to
|
||||
// be 64-bit aligned."
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint64 struct {
|
||||
_ sync.NoCopy
|
||||
value uint64
|
||||
value32 uint32
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint64) ptr() *uint64 {
|
||||
// On 32-bit systems, i.value is guaranteed to be 32-bit aligned. It means
|
||||
// that in the 12-byte i.value, there are guaranteed to be 8 contiguous bytes
|
||||
// with 64-bit alignment.
|
||||
return (*uint64)(unsafe.Pointer((uintptr(unsafe.Pointer(&u.value)) + 4) &^ 7))
|
||||
}
|
||||
|
||||
// FromUint64 returns an Uint64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint64(v uint64) Uint64 {
|
||||
var u Uint64
|
||||
*u.ptr() = v
|
||||
return u
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Load() uint64 {
|
||||
return atomic.LoadUint64(u.ptr())
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyLoad() uint64 {
|
||||
return *u.ptr()
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Store(v uint64) {
|
||||
atomic.StoreUint64(u.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyStore(v uint64) {
|
||||
*u.ptr() = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Add(v uint64) uint64 {
|
||||
return atomic.AddUint64(u.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyAdd(v uint64) uint64 {
|
||||
*u.ptr() += v
|
||||
return *u.ptr()
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Swap(v uint64) uint64 {
|
||||
return atomic.SwapUint64(u.ptr(), v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) CompareAndSwap(oldVal, newVal uint64) bool {
|
||||
return atomic.CompareAndSwapUint64(u.ptr(), oldVal, newVal)
|
||||
}
|
||||
212
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_64bit.go
vendored
Normal file
212
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_64bit.go
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
// Copyright 2021 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !arm && !mips && !mipsle && !386
|
||||
// +build !arm,!mips,!mipsle,!386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Int64 is an atomic int64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems. On most architectures, it's just a regular
|
||||
// int64.
|
||||
//
|
||||
// The default value is zero.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_32bit_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Int64 struct {
|
||||
_ sync.NoCopy
|
||||
value int64
|
||||
}
|
||||
|
||||
// FromInt64 returns an Int64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt64(v int64) Int64 {
|
||||
return Int64{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Load() int64 {
|
||||
return atomic.LoadInt64(&i.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyLoad() int64 {
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Store(v int64) {
|
||||
atomic.StoreInt64(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyStore(v int64) {
|
||||
i.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Add(v int64) int64 {
|
||||
return atomic.AddInt64(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyAdd(v int64) int64 {
|
||||
i.value += v
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Swap(v int64) int64 {
|
||||
return atomic.SwapInt64(&i.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) CompareAndSwap(oldVal, newVal int64) bool {
|
||||
return atomic.CompareAndSwapInt64(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int64) ptr() *int64 {
|
||||
return &i.value
|
||||
}
|
||||
|
||||
// Uint64 is an atomic uint64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems. On most architectures, it's just a regular
|
||||
// uint64.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint64 struct {
|
||||
_ sync.NoCopy
|
||||
value uint64
|
||||
}
|
||||
|
||||
// FromUint64 returns an Uint64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint64(v uint64) Uint64 {
|
||||
return Uint64{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Load() uint64 {
|
||||
return atomic.LoadUint64(&u.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyLoad() uint64 {
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Store(v uint64) {
|
||||
atomic.StoreUint64(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyStore(v uint64) {
|
||||
u.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Add(v uint64) uint64 {
|
||||
return atomic.AddUint64(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyAdd(v uint64) uint64 {
|
||||
u.value += v
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Swap(v uint64) uint64 {
|
||||
return atomic.SwapUint64(&u.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) CompareAndSwap(oldVal, newVal uint64) bool {
|
||||
return atomic.CompareAndSwapUint64(&u.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint64) ptr() *uint64 {
|
||||
return &u.value
|
||||
}
|
||||
82
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops.go
vendored
Normal file
82
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops.go
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64 || arm64
|
||||
// +build amd64 arm64
|
||||
|
||||
// Package atomicbitops provides extensions to the sync/atomic package.
|
||||
//
|
||||
// All read-modify-write operations implemented by this package have
|
||||
// acquire-release memory ordering (like sync/atomic).
|
||||
//
|
||||
// +checkalignedignore
|
||||
package atomicbitops
|
||||
|
||||
// AndUint32 atomically applies bitwise AND operation to *addr with val.
|
||||
func AndUint32(addr *Uint32, val uint32) {
|
||||
andUint32(&addr.value, val)
|
||||
}
|
||||
|
||||
func andUint32(addr *uint32, val uint32)
|
||||
|
||||
// OrUint32 atomically applies bitwise OR operation to *addr with val.
|
||||
func OrUint32(addr *Uint32, val uint32) {
|
||||
orUint32(&addr.value, val)
|
||||
}
|
||||
|
||||
func orUint32(addr *uint32, val uint32)
|
||||
|
||||
// XorUint32 atomically applies bitwise XOR operation to *addr with val.
|
||||
func XorUint32(addr *Uint32, val uint32) {
|
||||
xorUint32(&addr.value, val)
|
||||
}
|
||||
|
||||
func xorUint32(addr *uint32, val uint32)
|
||||
|
||||
// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns
|
||||
// the value previously stored at addr.
|
||||
func CompareAndSwapUint32(addr *Uint32, old, new uint32) uint32 {
|
||||
return compareAndSwapUint32(&addr.value, old, new)
|
||||
}
|
||||
|
||||
func compareAndSwapUint32(addr *uint32, old, new uint32) uint32
|
||||
|
||||
// AndUint64 atomically applies bitwise AND operation to *addr with val.
|
||||
func AndUint64(addr *Uint64, val uint64) {
|
||||
andUint64(&addr.value, val)
|
||||
}
|
||||
|
||||
func andUint64(addr *uint64, val uint64)
|
||||
|
||||
// OrUint64 atomically applies bitwise OR operation to *addr with val.
|
||||
func OrUint64(addr *Uint64, val uint64) {
|
||||
orUint64(&addr.value, val)
|
||||
}
|
||||
|
||||
func orUint64(addr *uint64, val uint64)
|
||||
|
||||
// XorUint64 atomically applies bitwise XOR operation to *addr with val.
|
||||
func XorUint64(addr *Uint64, val uint64) {
|
||||
xorUint64(&addr.value, val)
|
||||
}
|
||||
|
||||
func xorUint64(addr *uint64, val uint64)
|
||||
|
||||
// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns
|
||||
// the value previously stored at addr.
|
||||
func CompareAndSwapUint64(addr *Uint64, old, new uint64) uint64 {
|
||||
return compareAndSwapUint64(&addr.value, old, new)
|
||||
}
|
||||
|
||||
func compareAndSwapUint64(addr *uint64, old, new uint64) uint64
|
||||
93
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go
vendored
Normal file
93
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (i *Int32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int32"
|
||||
}
|
||||
|
||||
func (i *Int32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
}
|
||||
|
||||
func (i *Int32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint32"
|
||||
}
|
||||
|
||||
func (u *Uint32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
}
|
||||
|
||||
func (b *Bool) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Bool"
|
||||
}
|
||||
|
||||
func (b *Bool) StateFields() []string {
|
||||
return []string{
|
||||
"Uint32",
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bool) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateSave(stateSinkObject state.Sink) {
|
||||
b.beforeSave()
|
||||
stateSinkObject.Save(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func (b *Bool) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Int32)(nil))
|
||||
state.Register((*Uint32)(nil))
|
||||
state.Register((*Bool)(nil))
|
||||
}
|
||||
73
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
vendored
Normal file
73
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (i *Int64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int64"
|
||||
}
|
||||
|
||||
func (i *Int64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
"value32",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
stateSinkObject.Save(1, &i.value32)
|
||||
}
|
||||
|
||||
func (i *Int64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
stateSourceObject.Load(1, &i.value32)
|
||||
}
|
||||
|
||||
func (u *Uint64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint64"
|
||||
}
|
||||
|
||||
func (u *Uint64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
"value32",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
stateSinkObject.Save(1, &u.value32)
|
||||
}
|
||||
|
||||
func (u *Uint64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
stateSourceObject.Load(1, &u.value32)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Int64)(nil))
|
||||
state.Register((*Uint64)(nil))
|
||||
}
|
||||
145
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
vendored
Normal file
145
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build !arm && !mips && !mipsle && !386 && !arm && !mips && !mipsle && !386
|
||||
// +build !arm,!mips,!mipsle,!386,!arm,!mips,!mipsle,!386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (i *Int32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int32"
|
||||
}
|
||||
|
||||
func (i *Int32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
}
|
||||
|
||||
func (i *Int32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint32"
|
||||
}
|
||||
|
||||
func (u *Uint32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
}
|
||||
|
||||
func (b *Bool) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Bool"
|
||||
}
|
||||
|
||||
func (b *Bool) StateFields() []string {
|
||||
return []string{
|
||||
"Uint32",
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bool) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateSave(stateSinkObject state.Sink) {
|
||||
b.beforeSave()
|
||||
stateSinkObject.Save(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func (b *Bool) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func (i *Int64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int64"
|
||||
}
|
||||
|
||||
func (i *Int64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
}
|
||||
|
||||
func (i *Int64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
}
|
||||
|
||||
func (u *Uint64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint64"
|
||||
}
|
||||
|
||||
func (u *Uint64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
}
|
||||
|
||||
func (u *Uint64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Int32)(nil))
|
||||
state.Register((*Uint32)(nil))
|
||||
state.Register((*Bool)(nil))
|
||||
state.Register((*Int64)(nil))
|
||||
state.Register((*Uint64)(nil))
|
||||
}
|
||||
77
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s
vendored
Normal file
77
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build amd64
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·andUint32(SB),NOSPLIT|NOFRAME,$0-12
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVL val+8(FP), AX
|
||||
LOCK
|
||||
ANDL AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·orUint32(SB),NOSPLIT|NOFRAME,$0-12
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVL val+8(FP), AX
|
||||
LOCK
|
||||
ORL AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·xorUint32(SB),NOSPLIT|NOFRAME,$0-12
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVL val+8(FP), AX
|
||||
LOCK
|
||||
XORL AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint32(SB),NOSPLIT|NOFRAME,$0-20
|
||||
MOVQ addr+0(FP), DI
|
||||
MOVL old+8(FP), AX
|
||||
MOVL new+12(FP), DX
|
||||
LOCK
|
||||
CMPXCHGL DX, 0(DI)
|
||||
MOVL AX, ret+16(FP)
|
||||
RET
|
||||
|
||||
TEXT ·andUint64(SB),NOSPLIT|NOFRAME,$0-16
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVQ val+8(FP), AX
|
||||
LOCK
|
||||
ANDQ AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·orUint64(SB),NOSPLIT|NOFRAME,$0-16
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVQ val+8(FP), AX
|
||||
LOCK
|
||||
ORQ AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·xorUint64(SB),NOSPLIT|NOFRAME,$0-16
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVQ val+8(FP), AX
|
||||
LOCK
|
||||
XORQ AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint64(SB),NOSPLIT|NOFRAME,$0-32
|
||||
MOVQ addr+0(FP), DI
|
||||
MOVQ old+8(FP), AX
|
||||
MOVQ new+16(FP), DX
|
||||
LOCK
|
||||
CMPXCHGQ DX, 0(DI)
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
40
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.go
vendored
Normal file
40
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.go
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/sys/cpu"
|
||||
"gvisor.dev/gvisor/pkg/cpuid"
|
||||
)
|
||||
|
||||
var arm64HasATOMICS bool
|
||||
|
||||
func init() {
|
||||
// The gvisor cpuid package only works on Linux.
|
||||
// For all other operating systems, use Go's x/sys/cpu package
|
||||
// to get the one bit we care about here.
|
||||
//
|
||||
// See https://github.com/google/gvisor/issues/7849.
|
||||
if runtime.GOOS == "linux" {
|
||||
arm64HasATOMICS = cpuid.HostFeatureSet().HasFeature(cpuid.ARM64FeatureATOMICS)
|
||||
} else {
|
||||
arm64HasATOMICS = cpu.ARM64.HasATOMICS
|
||||
}
|
||||
}
|
||||
141
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s
vendored
Normal file
141
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build arm64
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·andUint32(SB),NOSPLIT,$0-12
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
MVN R1, R2
|
||||
LDCLRALW R2, (R0), R3
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R2
|
||||
ANDW R1, R2
|
||||
STLXRW R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·orUint32(SB),NOSPLIT,$0-12
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDORALW R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R2
|
||||
ORRW R1, R2
|
||||
STLXRW R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·xorUint32(SB),NOSPLIT,$0-12
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDEORALW R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R2
|
||||
EORW R1, R2
|
||||
STLXRW R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint32(SB),NOSPLIT,$0-20
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW old+8(FP), R1
|
||||
MOVW new+12(FP), R2
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
CASALW R1, (R0), R2
|
||||
MOVW R1, ret+16(FP)
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R3
|
||||
CMPW R1, R3
|
||||
BNE ok
|
||||
STLXRW R2, (R0), R4
|
||||
CBNZ R4, load_store_loop
|
||||
ok:
|
||||
MOVW R3, ret+16(FP)
|
||||
RET
|
||||
|
||||
TEXT ·andUint64(SB),NOSPLIT,$0-16
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
MVN R1, R2
|
||||
LDCLRALD R2, (R0), R3
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R2
|
||||
AND R1, R2
|
||||
STLXR R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·orUint64(SB),NOSPLIT,$0-16
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDORALD R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R2
|
||||
ORR R1, R2
|
||||
STLXR R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·xorUint64(SB),NOSPLIT,$0-16
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDEORALD R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R2
|
||||
EOR R1, R2
|
||||
STLXR R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint64(SB),NOSPLIT,$0-32
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD old+8(FP), R1
|
||||
MOVD new+16(FP), R2
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
CASALD R1, (R0), R2
|
||||
MOVD R1, ret+24(FP)
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R3
|
||||
CMP R1, R3
|
||||
BNE ok
|
||||
STLXR R2, (R0), R4
|
||||
CBNZ R4, load_store_loop
|
||||
ok:
|
||||
MOVD R3, ret+24(FP)
|
||||
RET
|
||||
6
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64_state_autogen.go
vendored
Normal file
6
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64_state_autogen.go
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package atomicbitops
|
||||
105
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_float64.go
vendored
Normal file
105
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_float64.go
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Float64 is an atomic 64-bit floating-point number.
|
||||
//
|
||||
// +stateify savable
|
||||
type Float64 struct {
|
||||
_ sync.NoCopy
|
||||
// bits stores the bit of a 64-bit floating point number.
|
||||
// It is not (and should not be interpreted as) a real uint64.
|
||||
bits Uint64
|
||||
}
|
||||
|
||||
// FromFloat64 returns a Float64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromFloat64(v float64) Float64 {
|
||||
return Float64{bits: FromUint64(math.Float64bits(v))}
|
||||
}
|
||||
|
||||
// Load loads the floating-point value.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Load() float64 {
|
||||
return math.Float64frombits(f.bits.Load())
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) RacyLoad() float64 {
|
||||
return math.Float64frombits(f.bits.RacyLoad())
|
||||
}
|
||||
|
||||
// Store stores the given floating-point value in the Float64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Store(v float64) {
|
||||
f.bits.Store(math.Float64bits(v))
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) RacyStore(v float64) {
|
||||
f.bits.RacyStore(math.Float64bits(v))
|
||||
}
|
||||
|
||||
// Swap stores the given value and returns the previously-stored one.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Swap(v float64) float64 {
|
||||
return math.Float64frombits(f.bits.Swap(math.Float64bits(v)))
|
||||
}
|
||||
|
||||
// CompareAndSwap does a compare-and-swap operation on the float64 value.
|
||||
// Note that unlike typical IEEE 754 semantics, this function will treat NaN
|
||||
// as equal to itself if all of its bits exactly match.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) CompareAndSwap(oldVal, newVal float64) bool {
|
||||
return f.bits.CompareAndSwap(math.Float64bits(oldVal), math.Float64bits(newVal))
|
||||
}
|
||||
|
||||
// Add increments the float by the given value.
|
||||
// Note that unlike an atomic integer, this requires spin-looping until we win
|
||||
// the compare-and-swap race, so this may take an indeterminate amount of time.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Add(v float64) {
|
||||
// We do a racy load here because we optimistically think it may pass the
|
||||
// compare-and-swap operation. If it doesn't, we'll load it safely, so this
|
||||
// is OK and not a race for the overall intent of the user to add a number.
|
||||
sync.RaceDisable()
|
||||
oldVal := f.RacyLoad()
|
||||
for !f.CompareAndSwap(oldVal, oldVal+v) {
|
||||
oldVal = f.Load()
|
||||
}
|
||||
sync.RaceEnable()
|
||||
}
|
||||
112
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_noasm.go
vendored
Normal file
112
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_noasm.go
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !amd64 && !arm64
|
||||
// +build !amd64,!arm64
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
//go:nosplit
|
||||
func AndUint32(addr *Uint32, val uint32) {
|
||||
for {
|
||||
o := addr.Load()
|
||||
n := o & val
|
||||
if atomic.CompareAndSwapUint32(&addr.value, o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func OrUint32(addr *Uint32, val uint32) {
|
||||
for {
|
||||
o := addr.Load()
|
||||
n := o | val
|
||||
if atomic.CompareAndSwapUint32(&addr.value, o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func XorUint32(addr *Uint32, val uint32) {
|
||||
for {
|
||||
o := addr.Load()
|
||||
n := o ^ val
|
||||
if atomic.CompareAndSwapUint32(&addr.value, o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func CompareAndSwapUint32(addr *Uint32, old, new uint32) (prev uint32) {
|
||||
for {
|
||||
prev = addr.Load()
|
||||
if prev != old {
|
||||
return
|
||||
}
|
||||
if atomic.CompareAndSwapUint32(&addr.value, old, new) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func AndUint64(addr *Uint64, val uint64) {
|
||||
for {
|
||||
o := atomic.LoadUint64(addr.ptr())
|
||||
n := o & val
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func OrUint64(addr *Uint64, val uint64) {
|
||||
for {
|
||||
o := atomic.LoadUint64(addr.ptr())
|
||||
n := o | val
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func XorUint64(addr *Uint64, val uint64) {
|
||||
for {
|
||||
o := atomic.LoadUint64(addr.ptr())
|
||||
n := o ^ val
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func CompareAndSwapUint64(addr *Uint64, old, new uint64) (prev uint64) {
|
||||
for {
|
||||
prev = atomic.LoadUint64(addr.ptr())
|
||||
if prev != old {
|
||||
return
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), old, new) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
43
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go
vendored
Normal file
43
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build (amd64 || arm64) && !amd64 && !arm64
|
||||
// +build amd64 arm64
|
||||
// +build !amd64
|
||||
// +build !arm64
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (f *Float64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Float64"
|
||||
}
|
||||
|
||||
func (f *Float64) StateFields() []string {
|
||||
return []string{
|
||||
"bits",
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Float64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (f *Float64) StateSave(stateSinkObject state.Sink) {
|
||||
f.beforeSave()
|
||||
stateSinkObject.Save(0, &f.bits)
|
||||
}
|
||||
|
||||
func (f *Float64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (f *Float64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &f.bits)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Float64)(nil))
|
||||
}
|
||||
Reference in New Issue
Block a user