Update dependencies
This commit is contained in:
8
vendor/gvisor.dev/gvisor/AUTHORS
vendored
Normal file
8
vendor/gvisor.dev/gvisor/AUTHORS
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# This is the list of gVisor authors for copyright purposes.
|
||||
#
|
||||
# This does not necessarily list everyone who has contributed code, since in
|
||||
# some cases, their employer may be the copyright holder. To see the full list
|
||||
# of contributors, see the revision history in source control.
|
||||
#
|
||||
# Please send a patch if you would like to be included in this list.
|
||||
Google LLC
|
||||
224
vendor/gvisor.dev/gvisor/LICENSE
vendored
Normal file
224
vendor/gvisor.dev/gvisor/LICENSE
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
------------------
|
||||
|
||||
Some files carry the following license, noted at the top of each file:
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go
vendored
Normal file
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Note that this file is *identical* to 32b_64bit.go, as go_stateify gets
|
||||
// confused about build tags if these are not separated.
|
||||
|
||||
// LINT.IfChange
|
||||
|
||||
// Int32 is an atomic int32.
|
||||
//
|
||||
// The default value is zero.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// +stateify savable
|
||||
type Int32 struct {
|
||||
_ sync.NoCopy
|
||||
value int32
|
||||
}
|
||||
|
||||
// FromInt32 returns an Int32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt32(v int32) Int32 {
|
||||
return Int32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Load() int32 {
|
||||
return atomic.LoadInt32(&i.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyLoad() int32 {
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Store(v int32) {
|
||||
atomic.StoreInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyStore(v int32) {
|
||||
i.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Add(v int32) int32 {
|
||||
return atomic.AddInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyAdd(v int32) int32 {
|
||||
i.value += v
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Swap(v int32) int32 {
|
||||
return atomic.SwapInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) CompareAndSwap(oldVal, newVal int32) bool {
|
||||
return atomic.CompareAndSwapInt32(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int32) ptr() *int32 {
|
||||
return &i.value
|
||||
}
|
||||
|
||||
// Uint32 is an atomic uint32.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint32 struct {
|
||||
_ sync.NoCopy
|
||||
value uint32
|
||||
}
|
||||
|
||||
// FromUint32 returns an Uint32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint32(v uint32) Uint32 {
|
||||
return Uint32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Load() uint32 {
|
||||
return atomic.LoadUint32(&u.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyLoad() uint32 {
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Store(v uint32) {
|
||||
atomic.StoreUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyStore(v uint32) {
|
||||
u.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Add(v uint32) uint32 {
|
||||
return atomic.AddUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyAdd(v uint32) uint32 {
|
||||
u.value += v
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Swap(v uint32) uint32 {
|
||||
return atomic.SwapUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) CompareAndSwap(oldVal, newVal uint32) bool {
|
||||
return atomic.CompareAndSwapUint32(&u.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint32) ptr() *uint32 {
|
||||
return &u.value
|
||||
}
|
||||
|
||||
// Bool is an atomic Boolean.
|
||||
//
|
||||
// It is implemented by a Uint32, with value 0 indicating false, and 1
|
||||
// indicating true.
|
||||
//
|
||||
// +stateify savable
|
||||
type Bool struct {
|
||||
Uint32
|
||||
}
|
||||
|
||||
// b32 returns a uint32 0 or 1 representing b.
|
||||
func b32(b bool) uint32 {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// FromBool returns a Bool initialized to value val.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromBool(val bool) Bool {
|
||||
return Bool{
|
||||
Uint32: FromUint32(b32(val)),
|
||||
}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Load() bool {
|
||||
return b.Uint32.Load() != 0
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyLoad() bool {
|
||||
return b.Uint32.RacyLoad() != 0
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Store(val bool) {
|
||||
b.Uint32.Store(b32(val))
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyStore(val bool) {
|
||||
b.Uint32.RacyStore(b32(val))
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Swap(val bool) bool {
|
||||
return b.Uint32.Swap(b32(val)) != 0
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
|
||||
// existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
|
||||
return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
|
||||
}
|
||||
|
||||
// LINT.ThenChange(32b_64bit.go)
|
||||
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go
vendored
Normal file
289
vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !arm && !mips && !mipsle && !386
|
||||
// +build !arm,!mips,!mipsle,!386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Note that this file is *identical* to 32b_32bit.go, as go_stateify gets
|
||||
// confused about build tags if these are not separated.
|
||||
|
||||
// LINT.IfChange
|
||||
|
||||
// Int32 is an atomic int32.
|
||||
//
|
||||
// The default value is zero.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// +stateify savable
|
||||
type Int32 struct {
|
||||
_ sync.NoCopy
|
||||
value int32
|
||||
}
|
||||
|
||||
// FromInt32 returns an Int32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt32(v int32) Int32 {
|
||||
return Int32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Load() int32 {
|
||||
return atomic.LoadInt32(&i.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyLoad() int32 {
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Store(v int32) {
|
||||
atomic.StoreInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyStore(v int32) {
|
||||
i.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Add(v int32) int32 {
|
||||
return atomic.AddInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) RacyAdd(v int32) int32 {
|
||||
i.value += v
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) Swap(v int32) int32 {
|
||||
return atomic.SwapInt32(&i.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int32) CompareAndSwap(oldVal, newVal int32) bool {
|
||||
return atomic.CompareAndSwapInt32(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int32) ptr() *int32 {
|
||||
return &i.value
|
||||
}
|
||||
|
||||
// Uint32 is an atomic uint32.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint32 struct {
|
||||
_ sync.NoCopy
|
||||
value uint32
|
||||
}
|
||||
|
||||
// FromUint32 returns an Uint32 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint32(v uint32) Uint32 {
|
||||
return Uint32{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Load() uint32 {
|
||||
return atomic.LoadUint32(&u.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyLoad() uint32 {
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Store(v uint32) {
|
||||
atomic.StoreUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyStore(v uint32) {
|
||||
u.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Add(v uint32) uint32 {
|
||||
return atomic.AddUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) RacyAdd(v uint32) uint32 {
|
||||
u.value += v
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) Swap(v uint32) uint32 {
|
||||
return atomic.SwapUint32(&u.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint32.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint32) CompareAndSwap(oldVal, newVal uint32) bool {
|
||||
return atomic.CompareAndSwapUint32(&u.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint32) ptr() *uint32 {
|
||||
return &u.value
|
||||
}
|
||||
|
||||
// Bool is an atomic Boolean.
|
||||
//
|
||||
// It is implemented by a Uint32, with value 0 indicating false, and 1
|
||||
// indicating true.
|
||||
//
|
||||
// +stateify savable
|
||||
type Bool struct {
|
||||
Uint32
|
||||
}
|
||||
|
||||
// b32 returns a uint32 0 or 1 representing b.
|
||||
func b32(b bool) uint32 {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// FromBool returns a Bool initialized to value val.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromBool(val bool) Bool {
|
||||
return Bool{
|
||||
Uint32: FromUint32(b32(val)),
|
||||
}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Load() bool {
|
||||
return b.Uint32.Load() != 0
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyLoad() bool {
|
||||
return b.Uint32.RacyLoad() != 0
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Store(val bool) {
|
||||
b.Uint32.Store(b32(val))
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) RacyStore(val bool) {
|
||||
b.Uint32.RacyStore(b32(val))
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapBool, if such a thing existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) Swap(val bool) bool {
|
||||
return b.Uint32.Swap(b32(val)) != 0
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
|
||||
// existed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
|
||||
return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
|
||||
}
|
||||
|
||||
// LINT.ThenChange(32b_32bit.go)
|
||||
231
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_32bit_unsafe.go
vendored
Normal file
231
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_32bit_unsafe.go
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright 2021 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Int64 is an atomic int64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// Per https://golang.org/pkg/sync/atomic/#pkg-note-BUG:
|
||||
//
|
||||
// "On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
|
||||
// for 64-bit alignment of 64-bit words accessed atomically. The first word in
|
||||
// a variable or in an allocated struct, array, or slice can be relied upon to
|
||||
// be 64-bit aligned."
|
||||
//
|
||||
// +stateify savable
|
||||
type Int64 struct {
|
||||
_ sync.NoCopy
|
||||
value int64
|
||||
value32 int32
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int64) ptr() *int64 {
|
||||
// On 32-bit systems, i.value is guaranteed to be 32-bit aligned. It means
|
||||
// that in the 12-byte i.value, there are guaranteed to be 8 contiguous bytes
|
||||
// with 64-bit alignment.
|
||||
return (*int64)(unsafe.Pointer((uintptr(unsafe.Pointer(&i.value)) + 4) &^ 7))
|
||||
}
|
||||
|
||||
// FromInt64 returns an Int64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt64(v int64) Int64 {
|
||||
var i Int64
|
||||
*i.ptr() = v
|
||||
return i
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Load() int64 {
|
||||
return atomic.LoadInt64(i.ptr())
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyLoad() int64 {
|
||||
return *i.ptr()
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Store(v int64) {
|
||||
atomic.StoreInt64(i.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyStore(v int64) {
|
||||
*i.ptr() = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Add(v int64) int64 {
|
||||
return atomic.AddInt64(i.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyAdd(v int64) int64 {
|
||||
*i.ptr() += v
|
||||
return *i.ptr()
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Swap(v int64) int64 {
|
||||
return atomic.SwapInt64(i.ptr(), v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) CompareAndSwap(oldVal, newVal int64) bool {
|
||||
return atomic.CompareAndSwapInt64(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
// Uint64 is an atomic uint64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// Per https://golang.org/pkg/sync/atomic/#pkg-note-BUG:
|
||||
//
|
||||
// "On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
|
||||
// for 64-bit alignment of 64-bit words accessed atomically. The first word in
|
||||
// a variable or in an allocated struct, array, or slice can be relied upon to
|
||||
// be 64-bit aligned."
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint64 struct {
|
||||
_ sync.NoCopy
|
||||
value uint64
|
||||
value32 uint32
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint64) ptr() *uint64 {
|
||||
// On 32-bit systems, i.value is guaranteed to be 32-bit aligned. It means
|
||||
// that in the 12-byte i.value, there are guaranteed to be 8 contiguous bytes
|
||||
// with 64-bit alignment.
|
||||
return (*uint64)(unsafe.Pointer((uintptr(unsafe.Pointer(&u.value)) + 4) &^ 7))
|
||||
}
|
||||
|
||||
// FromUint64 returns an Uint64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint64(v uint64) Uint64 {
|
||||
var u Uint64
|
||||
*u.ptr() = v
|
||||
return u
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Load() uint64 {
|
||||
return atomic.LoadUint64(u.ptr())
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyLoad() uint64 {
|
||||
return *u.ptr()
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Store(v uint64) {
|
||||
atomic.StoreUint64(u.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyStore(v uint64) {
|
||||
*u.ptr() = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Add(v uint64) uint64 {
|
||||
return atomic.AddUint64(u.ptr(), v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyAdd(v uint64) uint64 {
|
||||
*u.ptr() += v
|
||||
return *u.ptr()
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Swap(v uint64) uint64 {
|
||||
return atomic.SwapUint64(u.ptr(), v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) CompareAndSwap(oldVal, newVal uint64) bool {
|
||||
return atomic.CompareAndSwapUint64(u.ptr(), oldVal, newVal)
|
||||
}
|
||||
212
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_64bit.go
vendored
Normal file
212
vendor/gvisor.dev/gvisor/pkg/atomicbitops/aligned_64bit.go
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
// Copyright 2021 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !arm && !mips && !mipsle && !386
|
||||
// +build !arm,!mips,!mipsle,!386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Int64 is an atomic int64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems. On most architectures, it's just a regular
|
||||
// int64.
|
||||
//
|
||||
// The default value is zero.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_32bit_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Int64 struct {
|
||||
_ sync.NoCopy
|
||||
value int64
|
||||
}
|
||||
|
||||
// FromInt64 returns an Int64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromInt64(v int64) Int64 {
|
||||
return Int64{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Load() int64 {
|
||||
return atomic.LoadInt64(&i.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyLoad() int64 {
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Store(v int64) {
|
||||
atomic.StoreInt64(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyStore(v int64) {
|
||||
i.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Add(v int64) int64 {
|
||||
return atomic.AddInt64(&i.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) RacyAdd(v int64) int64 {
|
||||
i.value += v
|
||||
return i.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) Swap(v int64) int64 {
|
||||
return atomic.SwapInt64(&i.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapInt64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (i *Int64) CompareAndSwap(oldVal, newVal int64) bool {
|
||||
return atomic.CompareAndSwapInt64(&i.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (i *Int64) ptr() *int64 {
|
||||
return &i.value
|
||||
}
|
||||
|
||||
// Uint64 is an atomic uint64 that is guaranteed to be 64-bit
|
||||
// aligned, even on 32-bit systems. On most architectures, it's just a regular
|
||||
// uint64.
|
||||
//
|
||||
// Don't add fields to this struct. It is important that it remain the same
|
||||
// size as its builtin analogue.
|
||||
//
|
||||
// See aligned_unsafe.go in this directory for justification.
|
||||
//
|
||||
// +stateify savable
|
||||
type Uint64 struct {
|
||||
_ sync.NoCopy
|
||||
value uint64
|
||||
}
|
||||
|
||||
// FromUint64 returns an Uint64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromUint64(v uint64) Uint64 {
|
||||
return Uint64{value: v}
|
||||
}
|
||||
|
||||
// Load is analogous to atomic.LoadUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Load() uint64 {
|
||||
return atomic.LoadUint64(&u.value)
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyLoad() uint64 {
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Store is analogous to atomic.StoreUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Store(v uint64) {
|
||||
atomic.StoreUint64(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyStore(v uint64) {
|
||||
u.value = v
|
||||
}
|
||||
|
||||
// Add is analogous to atomic.AddUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Add(v uint64) uint64 {
|
||||
return atomic.AddUint64(&u.value, v)
|
||||
}
|
||||
|
||||
// RacyAdd is analogous to adding to an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) RacyAdd(v uint64) uint64 {
|
||||
u.value += v
|
||||
return u.value
|
||||
}
|
||||
|
||||
// Swap is analogous to atomic.SwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) Swap(v uint64) uint64 {
|
||||
return atomic.SwapUint64(&u.value, v)
|
||||
}
|
||||
|
||||
// CompareAndSwap is analogous to atomic.CompareAndSwapUint64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (u *Uint64) CompareAndSwap(oldVal, newVal uint64) bool {
|
||||
return atomic.CompareAndSwapUint64(&u.value, oldVal, newVal)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (u *Uint64) ptr() *uint64 {
|
||||
return &u.value
|
||||
}
|
||||
82
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops.go
vendored
Normal file
82
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops.go
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64 || arm64
|
||||
// +build amd64 arm64
|
||||
|
||||
// Package atomicbitops provides extensions to the sync/atomic package.
|
||||
//
|
||||
// All read-modify-write operations implemented by this package have
|
||||
// acquire-release memory ordering (like sync/atomic).
|
||||
//
|
||||
// +checkalignedignore
|
||||
package atomicbitops
|
||||
|
||||
// AndUint32 atomically applies bitwise AND operation to *addr with val.
|
||||
func AndUint32(addr *Uint32, val uint32) {
|
||||
andUint32(&addr.value, val)
|
||||
}
|
||||
|
||||
func andUint32(addr *uint32, val uint32)
|
||||
|
||||
// OrUint32 atomically applies bitwise OR operation to *addr with val.
|
||||
func OrUint32(addr *Uint32, val uint32) {
|
||||
orUint32(&addr.value, val)
|
||||
}
|
||||
|
||||
func orUint32(addr *uint32, val uint32)
|
||||
|
||||
// XorUint32 atomically applies bitwise XOR operation to *addr with val.
|
||||
func XorUint32(addr *Uint32, val uint32) {
|
||||
xorUint32(&addr.value, val)
|
||||
}
|
||||
|
||||
func xorUint32(addr *uint32, val uint32)
|
||||
|
||||
// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns
|
||||
// the value previously stored at addr.
|
||||
func CompareAndSwapUint32(addr *Uint32, old, new uint32) uint32 {
|
||||
return compareAndSwapUint32(&addr.value, old, new)
|
||||
}
|
||||
|
||||
func compareAndSwapUint32(addr *uint32, old, new uint32) uint32
|
||||
|
||||
// AndUint64 atomically applies bitwise AND operation to *addr with val.
|
||||
func AndUint64(addr *Uint64, val uint64) {
|
||||
andUint64(&addr.value, val)
|
||||
}
|
||||
|
||||
func andUint64(addr *uint64, val uint64)
|
||||
|
||||
// OrUint64 atomically applies bitwise OR operation to *addr with val.
|
||||
func OrUint64(addr *Uint64, val uint64) {
|
||||
orUint64(&addr.value, val)
|
||||
}
|
||||
|
||||
func orUint64(addr *uint64, val uint64)
|
||||
|
||||
// XorUint64 atomically applies bitwise XOR operation to *addr with val.
|
||||
func XorUint64(addr *Uint64, val uint64) {
|
||||
xorUint64(&addr.value, val)
|
||||
}
|
||||
|
||||
func xorUint64(addr *uint64, val uint64)
|
||||
|
||||
// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns
|
||||
// the value previously stored at addr.
|
||||
func CompareAndSwapUint64(addr *Uint64, old, new uint64) uint64 {
|
||||
return compareAndSwapUint64(&addr.value, old, new)
|
||||
}
|
||||
|
||||
func compareAndSwapUint64(addr *uint64, old, new uint64) uint64
|
||||
93
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go
vendored
Normal file
93
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (i *Int32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int32"
|
||||
}
|
||||
|
||||
func (i *Int32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
}
|
||||
|
||||
func (i *Int32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint32"
|
||||
}
|
||||
|
||||
func (u *Uint32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
}
|
||||
|
||||
func (b *Bool) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Bool"
|
||||
}
|
||||
|
||||
func (b *Bool) StateFields() []string {
|
||||
return []string{
|
||||
"Uint32",
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bool) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateSave(stateSinkObject state.Sink) {
|
||||
b.beforeSave()
|
||||
stateSinkObject.Save(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func (b *Bool) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Int32)(nil))
|
||||
state.Register((*Uint32)(nil))
|
||||
state.Register((*Bool)(nil))
|
||||
}
|
||||
73
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
vendored
Normal file
73
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build arm || mips || mipsle || 386
|
||||
// +build arm mips mipsle 386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (i *Int64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int64"
|
||||
}
|
||||
|
||||
func (i *Int64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
"value32",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
stateSinkObject.Save(1, &i.value32)
|
||||
}
|
||||
|
||||
func (i *Int64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
stateSourceObject.Load(1, &i.value32)
|
||||
}
|
||||
|
||||
func (u *Uint64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint64"
|
||||
}
|
||||
|
||||
func (u *Uint64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
"value32",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
stateSinkObject.Save(1, &u.value32)
|
||||
}
|
||||
|
||||
func (u *Uint64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
stateSourceObject.Load(1, &u.value32)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Int64)(nil))
|
||||
state.Register((*Uint64)(nil))
|
||||
}
|
||||
145
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
vendored
Normal file
145
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build !arm && !mips && !mipsle && !386 && !arm && !mips && !mipsle && !386
|
||||
// +build !arm,!mips,!mipsle,!386,!arm,!mips,!mipsle,!386
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (i *Int32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int32"
|
||||
}
|
||||
|
||||
func (i *Int32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
}
|
||||
|
||||
func (i *Int32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint32"
|
||||
}
|
||||
|
||||
func (u *Uint32) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint32) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
}
|
||||
|
||||
func (u *Uint32) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
}
|
||||
|
||||
func (b *Bool) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Bool"
|
||||
}
|
||||
|
||||
func (b *Bool) StateFields() []string {
|
||||
return []string{
|
||||
"Uint32",
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bool) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateSave(stateSinkObject state.Sink) {
|
||||
b.beforeSave()
|
||||
stateSinkObject.Save(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func (b *Bool) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &b.Uint32)
|
||||
}
|
||||
|
||||
func (i *Int64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Int64"
|
||||
}
|
||||
|
||||
func (i *Int64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Int64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.value)
|
||||
}
|
||||
|
||||
func (i *Int64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.value)
|
||||
}
|
||||
|
||||
func (u *Uint64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Uint64"
|
||||
}
|
||||
|
||||
func (u *Uint64) StateFields() []string {
|
||||
return []string{
|
||||
"value",
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateSave(stateSinkObject state.Sink) {
|
||||
u.beforeSave()
|
||||
stateSinkObject.Save(0, &u.value)
|
||||
}
|
||||
|
||||
func (u *Uint64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &u.value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Int32)(nil))
|
||||
state.Register((*Uint32)(nil))
|
||||
state.Register((*Bool)(nil))
|
||||
state.Register((*Int64)(nil))
|
||||
state.Register((*Uint64)(nil))
|
||||
}
|
||||
77
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s
vendored
Normal file
77
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build amd64
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·andUint32(SB),NOSPLIT|NOFRAME,$0-12
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVL val+8(FP), AX
|
||||
LOCK
|
||||
ANDL AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·orUint32(SB),NOSPLIT|NOFRAME,$0-12
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVL val+8(FP), AX
|
||||
LOCK
|
||||
ORL AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·xorUint32(SB),NOSPLIT|NOFRAME,$0-12
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVL val+8(FP), AX
|
||||
LOCK
|
||||
XORL AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint32(SB),NOSPLIT|NOFRAME,$0-20
|
||||
MOVQ addr+0(FP), DI
|
||||
MOVL old+8(FP), AX
|
||||
MOVL new+12(FP), DX
|
||||
LOCK
|
||||
CMPXCHGL DX, 0(DI)
|
||||
MOVL AX, ret+16(FP)
|
||||
RET
|
||||
|
||||
TEXT ·andUint64(SB),NOSPLIT|NOFRAME,$0-16
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVQ val+8(FP), AX
|
||||
LOCK
|
||||
ANDQ AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·orUint64(SB),NOSPLIT|NOFRAME,$0-16
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVQ val+8(FP), AX
|
||||
LOCK
|
||||
ORQ AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·xorUint64(SB),NOSPLIT|NOFRAME,$0-16
|
||||
MOVQ addr+0(FP), BX
|
||||
MOVQ val+8(FP), AX
|
||||
LOCK
|
||||
XORQ AX, 0(BX)
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint64(SB),NOSPLIT|NOFRAME,$0-32
|
||||
MOVQ addr+0(FP), DI
|
||||
MOVQ old+8(FP), AX
|
||||
MOVQ new+16(FP), DX
|
||||
LOCK
|
||||
CMPXCHGQ DX, 0(DI)
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
40
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.go
vendored
Normal file
40
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.go
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/sys/cpu"
|
||||
"gvisor.dev/gvisor/pkg/cpuid"
|
||||
)
|
||||
|
||||
var arm64HasATOMICS bool
|
||||
|
||||
func init() {
|
||||
// The gvisor cpuid package only works on Linux.
|
||||
// For all other operating systems, use Go's x/sys/cpu package
|
||||
// to get the one bit we care about here.
|
||||
//
|
||||
// See https://github.com/google/gvisor/issues/7849.
|
||||
if runtime.GOOS == "linux" {
|
||||
arm64HasATOMICS = cpuid.HostFeatureSet().HasFeature(cpuid.ARM64FeatureATOMICS)
|
||||
} else {
|
||||
arm64HasATOMICS = cpu.ARM64.HasATOMICS
|
||||
}
|
||||
}
|
||||
141
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s
vendored
Normal file
141
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build arm64
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·andUint32(SB),NOSPLIT,$0-12
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
MVN R1, R2
|
||||
LDCLRALW R2, (R0), R3
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R2
|
||||
ANDW R1, R2
|
||||
STLXRW R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·orUint32(SB),NOSPLIT,$0-12
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDORALW R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R2
|
||||
ORRW R1, R2
|
||||
STLXRW R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·xorUint32(SB),NOSPLIT,$0-12
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDEORALW R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R2
|
||||
EORW R1, R2
|
||||
STLXRW R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint32(SB),NOSPLIT,$0-20
|
||||
MOVD addr+0(FP), R0
|
||||
MOVW old+8(FP), R1
|
||||
MOVW new+12(FP), R2
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
CASALW R1, (R0), R2
|
||||
MOVW R1, ret+16(FP)
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXRW (R0), R3
|
||||
CMPW R1, R3
|
||||
BNE ok
|
||||
STLXRW R2, (R0), R4
|
||||
CBNZ R4, load_store_loop
|
||||
ok:
|
||||
MOVW R3, ret+16(FP)
|
||||
RET
|
||||
|
||||
TEXT ·andUint64(SB),NOSPLIT,$0-16
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
MVN R1, R2
|
||||
LDCLRALD R2, (R0), R3
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R2
|
||||
AND R1, R2
|
||||
STLXR R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·orUint64(SB),NOSPLIT,$0-16
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDORALD R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R2
|
||||
ORR R1, R2
|
||||
STLXR R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·xorUint64(SB),NOSPLIT,$0-16
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD val+8(FP), R1
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
LDEORALD R1, (R0), R2
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R2
|
||||
EOR R1, R2
|
||||
STLXR R2, (R0), R3
|
||||
CBNZ R3, load_store_loop
|
||||
RET
|
||||
|
||||
TEXT ·compareAndSwapUint64(SB),NOSPLIT,$0-32
|
||||
MOVD addr+0(FP), R0
|
||||
MOVD old+8(FP), R1
|
||||
MOVD new+16(FP), R2
|
||||
MOVBU ·arm64HasATOMICS(SB), R4
|
||||
CBZ R4, load_store_loop
|
||||
CASALD R1, (R0), R2
|
||||
MOVD R1, ret+24(FP)
|
||||
RET
|
||||
load_store_loop:
|
||||
LDAXR (R0), R3
|
||||
CMP R1, R3
|
||||
BNE ok
|
||||
STLXR R2, (R0), R4
|
||||
CBNZ R4, load_store_loop
|
||||
ok:
|
||||
MOVD R3, ret+24(FP)
|
||||
RET
|
||||
6
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64_state_autogen.go
vendored
Normal file
6
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64_state_autogen.go
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package atomicbitops
|
||||
105
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_float64.go
vendored
Normal file
105
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_float64.go
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Float64 is an atomic 64-bit floating-point number.
|
||||
//
|
||||
// +stateify savable
|
||||
type Float64 struct {
|
||||
_ sync.NoCopy
|
||||
// bits stores the bit of a 64-bit floating point number.
|
||||
// It is not (and should not be interpreted as) a real uint64.
|
||||
bits Uint64
|
||||
}
|
||||
|
||||
// FromFloat64 returns a Float64 initialized to value v.
|
||||
//
|
||||
//go:nosplit
|
||||
func FromFloat64(v float64) Float64 {
|
||||
return Float64{bits: FromUint64(math.Float64bits(v))}
|
||||
}
|
||||
|
||||
// Load loads the floating-point value.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Load() float64 {
|
||||
return math.Float64frombits(f.bits.Load())
|
||||
}
|
||||
|
||||
// RacyLoad is analogous to reading an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) RacyLoad() float64 {
|
||||
return math.Float64frombits(f.bits.RacyLoad())
|
||||
}
|
||||
|
||||
// Store stores the given floating-point value in the Float64.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Store(v float64) {
|
||||
f.bits.Store(math.Float64bits(v))
|
||||
}
|
||||
|
||||
// RacyStore is analogous to setting an atomic value without using
|
||||
// synchronization.
|
||||
//
|
||||
// It may be helpful to document why a racy operation is permitted.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) RacyStore(v float64) {
|
||||
f.bits.RacyStore(math.Float64bits(v))
|
||||
}
|
||||
|
||||
// Swap stores the given value and returns the previously-stored one.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Swap(v float64) float64 {
|
||||
return math.Float64frombits(f.bits.Swap(math.Float64bits(v)))
|
||||
}
|
||||
|
||||
// CompareAndSwap does a compare-and-swap operation on the float64 value.
|
||||
// Note that unlike typical IEEE 754 semantics, this function will treat NaN
|
||||
// as equal to itself if all of its bits exactly match.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) CompareAndSwap(oldVal, newVal float64) bool {
|
||||
return f.bits.CompareAndSwap(math.Float64bits(oldVal), math.Float64bits(newVal))
|
||||
}
|
||||
|
||||
// Add increments the float by the given value.
|
||||
// Note that unlike an atomic integer, this requires spin-looping until we win
|
||||
// the compare-and-swap race, so this may take an indeterminate amount of time.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f *Float64) Add(v float64) {
|
||||
// We do a racy load here because we optimistically think it may pass the
|
||||
// compare-and-swap operation. If it doesn't, we'll load it safely, so this
|
||||
// is OK and not a race for the overall intent of the user to add a number.
|
||||
sync.RaceDisable()
|
||||
oldVal := f.RacyLoad()
|
||||
for !f.CompareAndSwap(oldVal, oldVal+v) {
|
||||
oldVal = f.Load()
|
||||
}
|
||||
sync.RaceEnable()
|
||||
}
|
||||
112
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_noasm.go
vendored
Normal file
112
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_noasm.go
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !amd64 && !arm64
|
||||
// +build !amd64,!arm64
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
//go:nosplit
|
||||
func AndUint32(addr *Uint32, val uint32) {
|
||||
for {
|
||||
o := addr.Load()
|
||||
n := o & val
|
||||
if atomic.CompareAndSwapUint32(&addr.value, o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func OrUint32(addr *Uint32, val uint32) {
|
||||
for {
|
||||
o := addr.Load()
|
||||
n := o | val
|
||||
if atomic.CompareAndSwapUint32(&addr.value, o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func XorUint32(addr *Uint32, val uint32) {
|
||||
for {
|
||||
o := addr.Load()
|
||||
n := o ^ val
|
||||
if atomic.CompareAndSwapUint32(&addr.value, o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func CompareAndSwapUint32(addr *Uint32, old, new uint32) (prev uint32) {
|
||||
for {
|
||||
prev = addr.Load()
|
||||
if prev != old {
|
||||
return
|
||||
}
|
||||
if atomic.CompareAndSwapUint32(&addr.value, old, new) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func AndUint64(addr *Uint64, val uint64) {
|
||||
for {
|
||||
o := atomic.LoadUint64(addr.ptr())
|
||||
n := o & val
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func OrUint64(addr *Uint64, val uint64) {
|
||||
for {
|
||||
o := atomic.LoadUint64(addr.ptr())
|
||||
n := o | val
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func XorUint64(addr *Uint64, val uint64) {
|
||||
for {
|
||||
o := atomic.LoadUint64(addr.ptr())
|
||||
n := o ^ val
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), o, n) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func CompareAndSwapUint64(addr *Uint64, old, new uint64) (prev uint64) {
|
||||
for {
|
||||
prev = atomic.LoadUint64(addr.ptr())
|
||||
if prev != old {
|
||||
return
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(addr.ptr(), old, new) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
43
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go
vendored
Normal file
43
vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build (amd64 || arm64) && !amd64 && !arm64
|
||||
// +build amd64 arm64
|
||||
// +build !amd64
|
||||
// +build !arm64
|
||||
|
||||
package atomicbitops
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (f *Float64) StateTypeName() string {
|
||||
return "pkg/atomicbitops.Float64"
|
||||
}
|
||||
|
||||
func (f *Float64) StateFields() []string {
|
||||
return []string{
|
||||
"bits",
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Float64) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (f *Float64) StateSave(stateSinkObject state.Sink) {
|
||||
f.beforeSave()
|
||||
stateSinkObject.Save(0, &f.bits)
|
||||
}
|
||||
|
||||
func (f *Float64) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (f *Float64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &f.bits)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Float64)(nil))
|
||||
}
|
||||
26
vendor/gvisor.dev/gvisor/pkg/bits/bits.go
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/bits/bits.go
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package bits includes all bit related types and operations.
|
||||
package bits
|
||||
|
||||
// AlignUp rounds a length up to an alignment. align must be a power of 2.
|
||||
func AlignUp(length int, align uint) int {
|
||||
return (length + int(align) - 1) & ^(int(align) - 1)
|
||||
}
|
||||
|
||||
// AlignDown rounds a length down to an alignment. align must be a power of 2.
|
||||
func AlignDown(length int, align uint) int {
|
||||
return length & ^(int(align) - 1)
|
||||
}
|
||||
33
vendor/gvisor.dev/gvisor/pkg/bits/bits32.go
vendored
Normal file
33
vendor/gvisor.dev/gvisor/pkg/bits/bits32.go
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package bits
|
||||
|
||||
// IsOn returns true if *all* bits set in 'bits' are set in 'mask'.
|
||||
func IsOn32(mask, bits uint32) bool {
|
||||
return mask&bits == bits
|
||||
}
|
||||
|
||||
// IsAnyOn returns true if *any* bit set in 'bits' is set in 'mask'.
|
||||
func IsAnyOn32(mask, bits uint32) bool {
|
||||
return mask&bits != 0
|
||||
}
|
||||
|
||||
// Mask returns a T with all of the given bits set.
|
||||
func Mask32(is ...int) uint32 {
|
||||
ret := uint32(0)
|
||||
for _, i := range is {
|
||||
ret |= MaskOf32(i)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// MaskOf is like Mask, but sets only a single bit (more efficiently).
|
||||
func MaskOf32(i int) uint32 {
|
||||
return uint32(1) << uint32(i)
|
||||
}
|
||||
|
||||
// IsPowerOfTwo returns true if v is power of 2.
|
||||
func IsPowerOfTwo32(v uint32) bool {
|
||||
if v == 0 {
|
||||
return false
|
||||
}
|
||||
return v&(v-1) == 0
|
||||
}
|
||||
33
vendor/gvisor.dev/gvisor/pkg/bits/bits64.go
vendored
Normal file
33
vendor/gvisor.dev/gvisor/pkg/bits/bits64.go
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package bits
|
||||
|
||||
// IsOn returns true if *all* bits set in 'bits' are set in 'mask'.
|
||||
func IsOn64(mask, bits uint64) bool {
|
||||
return mask&bits == bits
|
||||
}
|
||||
|
||||
// IsAnyOn returns true if *any* bit set in 'bits' is set in 'mask'.
|
||||
func IsAnyOn64(mask, bits uint64) bool {
|
||||
return mask&bits != 0
|
||||
}
|
||||
|
||||
// Mask returns a T with all of the given bits set.
|
||||
func Mask64(is ...int) uint64 {
|
||||
ret := uint64(0)
|
||||
for _, i := range is {
|
||||
ret |= MaskOf64(i)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// MaskOf is like Mask, but sets only a single bit (more efficiently).
|
||||
func MaskOf64(i int) uint64 {
|
||||
return uint64(1) << uint64(i)
|
||||
}
|
||||
|
||||
// IsPowerOfTwo returns true if v is power of 2.
|
||||
func IsPowerOfTwo64(v uint64) bool {
|
||||
if v == 0 {
|
||||
return false
|
||||
}
|
||||
return v&(v-1) == 0
|
||||
}
|
||||
8
vendor/gvisor.dev/gvisor/pkg/bits/bits_state_autogen.go
vendored
Normal file
8
vendor/gvisor.dev/gvisor/pkg/bits/bits_state_autogen.go
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build (amd64 || arm64) && !amd64 && !arm64
|
||||
// +build amd64 arm64
|
||||
// +build !amd64
|
||||
// +build !arm64
|
||||
|
||||
package bits
|
||||
37
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch.go
vendored
Normal file
37
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch.go
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64 || arm64
|
||||
// +build amd64 arm64
|
||||
|
||||
package bits
|
||||
|
||||
// TrailingZeros64 returns the number of bits before the least significant 1
|
||||
// bit in x; in other words, it returns the index of the least significant 1
|
||||
// bit in x. If x is 0, TrailingZeros64 returns 64.
|
||||
func TrailingZeros64(x uint64) int
|
||||
|
||||
// MostSignificantOne64 returns the index of the most significant 1 bit in
|
||||
// x. If x is 0, MostSignificantOne64 returns 64.
|
||||
func MostSignificantOne64(x uint64) int
|
||||
|
||||
// ForEachSetBit64 calls f once for each set bit in x, with argument i equal to
|
||||
// the set bit's index.
|
||||
func ForEachSetBit64(x uint64, f func(i int)) {
|
||||
for x != 0 {
|
||||
i := TrailingZeros64(x)
|
||||
f(i)
|
||||
x &^= MaskOf64(i)
|
||||
}
|
||||
}
|
||||
32
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch_amd64_asm.s
vendored
Normal file
32
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch_amd64_asm.s
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64
|
||||
// +build amd64
|
||||
|
||||
TEXT ·TrailingZeros64(SB),$0-16
|
||||
BSFQ x+0(FP), AX
|
||||
JNZ end
|
||||
MOVQ $64, AX
|
||||
end:
|
||||
MOVQ AX, ret+8(FP)
|
||||
RET
|
||||
|
||||
TEXT ·MostSignificantOne64(SB),$0-16
|
||||
BSRQ x+0(FP), AX
|
||||
JNZ end
|
||||
MOVQ $64, AX
|
||||
end:
|
||||
MOVQ AX, ret+8(FP)
|
||||
RET
|
||||
34
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch_arm64_asm.s
vendored
Normal file
34
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch_arm64_asm.s
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
TEXT ·TrailingZeros64(SB),$0-16
|
||||
MOVD x+0(FP), R0
|
||||
RBIT R0, R0
|
||||
CLZ R0, R0 // return 64 if x == 0
|
||||
MOVD R0, ret+8(FP)
|
||||
RET
|
||||
|
||||
TEXT ·MostSignificantOne64(SB),$0-16
|
||||
MOVD x+0(FP), R0
|
||||
CLZ R0, R0 // return 64 if x == 0
|
||||
MOVD $63, R1
|
||||
SUBS R0, R1, R0 // ret = 63 - CLZ
|
||||
BPL end
|
||||
MOVD $64, R0 // x == 0
|
||||
end:
|
||||
MOVD R0, ret+8(FP)
|
||||
RET
|
||||
56
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch_generic.go
vendored
Normal file
56
vendor/gvisor.dev/gvisor/pkg/bits/uint64_arch_generic.go
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !amd64 && !arm64
|
||||
// +build !amd64,!arm64
|
||||
|
||||
package bits
|
||||
|
||||
// TrailingZeros64 returns the number of bits before the least significant 1
|
||||
// bit in x; in other words, it returns the index of the least significant 1
|
||||
// bit in x. If x is 0, TrailingZeros64 returns 64.
|
||||
func TrailingZeros64(x uint64) int {
|
||||
if x == 0 {
|
||||
return 64
|
||||
}
|
||||
i := 0
|
||||
for ; x&1 == 0; i++ {
|
||||
x >>= 1
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// MostSignificantOne64 returns the index of the most significant 1 bit in
|
||||
// x. If x is 0, MostSignificantOne64 returns 64.
|
||||
func MostSignificantOne64(x uint64) int {
|
||||
if x == 0 {
|
||||
return 64
|
||||
}
|
||||
i := 63
|
||||
for ; x&(1<<63) == 0; i-- {
|
||||
x <<= 1
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// ForEachSetBit64 calls f once for each set bit in x, with argument i equal to
|
||||
// the set bit's index.
|
||||
func ForEachSetBit64(x uint64, f func(i int)) {
|
||||
for i := 0; x != 0; i++ {
|
||||
if x&1 != 0 {
|
||||
f(i)
|
||||
}
|
||||
x >>= 1
|
||||
}
|
||||
}
|
||||
657
vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go
vendored
Normal file
657
vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go
vendored
Normal file
@@ -0,0 +1,657 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package buffer provides the implementation of a non-contiguous buffer that
|
||||
// is reference counted, pooled, and copy-on-write. It allows O(1) append,
|
||||
// and prepend operations.
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/tcpip/checksum"
|
||||
)
|
||||
|
||||
// Buffer is a non-linear buffer.
|
||||
//
|
||||
// +stateify savable
|
||||
type Buffer struct {
|
||||
data ViewList `state:".([]byte)"`
|
||||
size int64
|
||||
}
|
||||
|
||||
func (b *Buffer) removeView(v *View) {
|
||||
b.data.Remove(v)
|
||||
v.Release()
|
||||
}
|
||||
|
||||
// MakeWithData creates a new Buffer initialized with given data. This function
|
||||
// should be used with caution to avoid unnecessary []byte allocations. When in
|
||||
// doubt use NewWithView to maximize chunk reuse.
|
||||
func MakeWithData(b []byte) Buffer {
|
||||
buf := Buffer{}
|
||||
if len(b) == 0 {
|
||||
return buf
|
||||
}
|
||||
v := NewViewWithData(b)
|
||||
buf.Append(v)
|
||||
return buf
|
||||
}
|
||||
|
||||
// MakeWithView creates a new Buffer initialized with given view. This function
|
||||
// takes ownership of v.
|
||||
func MakeWithView(v *View) Buffer {
|
||||
if v == nil {
|
||||
return Buffer{}
|
||||
}
|
||||
b := Buffer{
|
||||
size: int64(v.Size()),
|
||||
}
|
||||
if b.size == 0 {
|
||||
v.Release()
|
||||
return b
|
||||
}
|
||||
b.data.PushBack(v)
|
||||
return b
|
||||
}
|
||||
|
||||
// Release frees all resources held by b.
|
||||
func (b *Buffer) Release() {
|
||||
for v := b.data.Front(); v != nil; v = b.data.Front() {
|
||||
b.removeView(v)
|
||||
}
|
||||
b.size = 0
|
||||
}
|
||||
|
||||
// TrimFront removes the first count bytes from the buffer.
|
||||
func (b *Buffer) TrimFront(count int64) {
|
||||
if count >= b.size {
|
||||
b.advanceRead(b.size)
|
||||
} else {
|
||||
b.advanceRead(count)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadAt implements io.ReaderAt.ReadAt.
|
||||
func (b *Buffer) ReadAt(p []byte, offset int64) (int, error) {
|
||||
var (
|
||||
skipped int64
|
||||
done int64
|
||||
)
|
||||
for v := b.data.Front(); v != nil && done < int64(len(p)); v = v.Next() {
|
||||
needToSkip := int(offset - skipped)
|
||||
if sz := v.Size(); sz <= needToSkip {
|
||||
skipped += int64(sz)
|
||||
continue
|
||||
}
|
||||
|
||||
// Actually read data.
|
||||
n := copy(p[done:], v.AsSlice()[needToSkip:])
|
||||
skipped += int64(needToSkip)
|
||||
done += int64(n)
|
||||
}
|
||||
if int(done) < len(p) || offset+done == b.size {
|
||||
return int(done), io.EOF
|
||||
}
|
||||
return int(done), nil
|
||||
}
|
||||
|
||||
// advanceRead advances the Buffer's read index.
|
||||
//
|
||||
// Precondition: there must be sufficient bytes in the buffer.
|
||||
func (b *Buffer) advanceRead(count int64) {
|
||||
for v := b.data.Front(); v != nil && count > 0; {
|
||||
sz := int64(v.Size())
|
||||
if sz > count {
|
||||
// There is still data for reading.
|
||||
v.TrimFront(int(count))
|
||||
b.size -= count
|
||||
count = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the whole view.
|
||||
oldView := v
|
||||
v = v.Next() // Iterate.
|
||||
b.removeView(oldView)
|
||||
|
||||
// Update counts.
|
||||
count -= sz
|
||||
b.size -= sz
|
||||
}
|
||||
if count > 0 {
|
||||
panic(fmt.Sprintf("advanceRead still has %d bytes remaining", count))
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate truncates the Buffer to the given length.
|
||||
//
|
||||
// This will not grow the Buffer, only shrink it. If a length is passed that is
|
||||
// greater than the current size of the Buffer, then nothing will happen.
|
||||
//
|
||||
// Precondition: length must be >= 0.
|
||||
func (b *Buffer) Truncate(length int64) {
|
||||
if length < 0 {
|
||||
panic("negative length provided")
|
||||
}
|
||||
if length >= b.size {
|
||||
return // Nothing to do.
|
||||
}
|
||||
for v := b.data.Back(); v != nil && b.size > length; v = b.data.Back() {
|
||||
sz := int64(v.Size())
|
||||
if after := b.size - sz; after < length {
|
||||
// Truncate the buffer locally.
|
||||
left := (length - after)
|
||||
v.write = v.read + int(left)
|
||||
b.size = length
|
||||
break
|
||||
}
|
||||
|
||||
// Drop the buffer completely; see above.
|
||||
b.removeView(v)
|
||||
b.size -= sz
|
||||
}
|
||||
}
|
||||
|
||||
// GrowTo grows the given Buffer to the number of bytes, which will be appended.
|
||||
// If zero is true, all these bytes will be zero. If zero is false, then this is
|
||||
// the caller's responsibility.
|
||||
//
|
||||
// Precondition: length must be >= 0.
|
||||
func (b *Buffer) GrowTo(length int64, zero bool) {
|
||||
if length < 0 {
|
||||
panic("negative length provided")
|
||||
}
|
||||
for b.size < length {
|
||||
v := b.data.Back()
|
||||
|
||||
// Is there some space in the last buffer?
|
||||
if v.Full() {
|
||||
v = NewView(int(length - b.size))
|
||||
b.data.PushBack(v)
|
||||
}
|
||||
|
||||
// Write up to length bytes.
|
||||
sz := v.AvailableSize()
|
||||
if int64(sz) > length-b.size {
|
||||
sz = int(length - b.size)
|
||||
}
|
||||
|
||||
// Zero the written section.
|
||||
if zero {
|
||||
clear(v.chunk.data[v.write : v.write+sz])
|
||||
}
|
||||
|
||||
// Advance the index.
|
||||
v.Grow(sz)
|
||||
b.size += int64(sz)
|
||||
}
|
||||
}
|
||||
|
||||
// Prepend prepends the given data. Prepend takes ownership of src.
|
||||
func (b *Buffer) Prepend(src *View) error {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
// If the first buffer does not have room just prepend the view.
|
||||
v := b.data.Front()
|
||||
if v == nil || v.read == 0 {
|
||||
b.prependOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there's room at the front and we won't incur a copy by writing to this
|
||||
// view, fill in the extra room first.
|
||||
if !v.sharesChunk() {
|
||||
avail := v.read
|
||||
vStart := 0
|
||||
srcStart := src.Size() - avail
|
||||
if avail > src.Size() {
|
||||
vStart = avail - src.Size()
|
||||
srcStart = 0
|
||||
}
|
||||
// Save the write index and restore it after.
|
||||
old := v.write
|
||||
v.read = vStart
|
||||
n, err := v.WriteAt(src.AsSlice()[srcStart:], 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not write to view during append: %w", err)
|
||||
}
|
||||
b.size += int64(n)
|
||||
v.write = old
|
||||
src.write = srcStart
|
||||
|
||||
// If there's no more to be written, then we're done.
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, just prepend the view.
|
||||
b.prependOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends the given data. Append takes ownership of src.
|
||||
func (b *Buffer) Append(src *View) error {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
// If the last buffer is full, just append the view.
|
||||
v := b.data.Back()
|
||||
if v.Full() {
|
||||
b.appendOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If a write won't incur a copy, then fill the back of the existing last
|
||||
// chunk.
|
||||
if !v.sharesChunk() {
|
||||
writeSz := src.Size()
|
||||
if src.Size() > v.AvailableSize() {
|
||||
writeSz = v.AvailableSize()
|
||||
}
|
||||
done, err := v.Write(src.AsSlice()[:writeSz])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not write to view during append: %w", err)
|
||||
}
|
||||
src.TrimFront(done)
|
||||
b.size += int64(done)
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// If there is still data left just append the src.
|
||||
b.appendOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Buffer) appendOwned(v *View) {
|
||||
b.data.PushBack(v)
|
||||
b.size += int64(v.Size())
|
||||
}
|
||||
|
||||
func (b *Buffer) prependOwned(v *View) {
|
||||
b.data.PushFront(v)
|
||||
b.size += int64(v.Size())
|
||||
}
|
||||
|
||||
// PullUp makes the specified range contiguous and returns the backing memory.
|
||||
func (b *Buffer) PullUp(offset, length int) (View, bool) {
|
||||
if length == 0 {
|
||||
return View{}, true
|
||||
}
|
||||
tgt := Range{begin: offset, end: offset + length}
|
||||
if tgt.Intersect(Range{end: int(b.size)}).Len() != length {
|
||||
return View{}, false
|
||||
}
|
||||
|
||||
curr := Range{}
|
||||
v := b.data.Front()
|
||||
for ; v != nil; v = v.Next() {
|
||||
origLen := v.Size()
|
||||
curr.end = curr.begin + origLen
|
||||
|
||||
if x := curr.Intersect(tgt); x.Len() == tgt.Len() {
|
||||
// buf covers the whole requested target range.
|
||||
sub := x.Offset(-curr.begin)
|
||||
// Don't increment the reference count of the underlying chunk. Views
|
||||
// returned by PullUp are explicitly unowned and read only
|
||||
new := View{
|
||||
read: v.read + sub.begin,
|
||||
write: v.read + sub.end,
|
||||
chunk: v.chunk,
|
||||
}
|
||||
return new, true
|
||||
} else if x.Len() > 0 {
|
||||
// buf is pointing at the starting buffer we want to merge.
|
||||
break
|
||||
}
|
||||
|
||||
curr.begin += origLen
|
||||
}
|
||||
|
||||
// Calculate the total merged length.
|
||||
totLen := 0
|
||||
for n := v; n != nil; n = n.Next() {
|
||||
totLen += n.Size()
|
||||
if curr.begin+totLen >= tgt.end {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Merge the buffers.
|
||||
merged := NewViewSize(totLen)
|
||||
off := 0
|
||||
for n := v; n != nil && off < totLen; {
|
||||
merged.WriteAt(n.AsSlice(), off)
|
||||
off += n.Size()
|
||||
|
||||
// Remove buffers except for the first one, which will be reused.
|
||||
if n == v {
|
||||
n = n.Next()
|
||||
} else {
|
||||
old := n
|
||||
n = n.Next()
|
||||
b.removeView(old)
|
||||
}
|
||||
}
|
||||
// Make data the first buffer.
|
||||
b.data.InsertBefore(v, merged)
|
||||
b.removeView(v)
|
||||
|
||||
r := tgt.Offset(-curr.begin)
|
||||
pulled := View{
|
||||
read: r.begin,
|
||||
write: r.end,
|
||||
chunk: merged.chunk,
|
||||
}
|
||||
return pulled, true
|
||||
}
|
||||
|
||||
// Flatten returns a flattened copy of this data.
|
||||
//
|
||||
// This method should not be used in any performance-sensitive paths. It may
|
||||
// allocate a fresh byte slice sufficiently large to contain all the data in
|
||||
// the buffer. This is principally for debugging.
|
||||
//
|
||||
// N.B. Tee data still belongs to this Buffer, as if there is a single buffer
|
||||
// present, then it will be returned directly. This should be used for
|
||||
// temporary use only, and a reference to the given slice should not be held.
|
||||
func (b *Buffer) Flatten() []byte {
|
||||
if v := b.data.Front(); v == nil {
|
||||
return nil // No data at all.
|
||||
}
|
||||
data := make([]byte, 0, b.size) // Need to flatten.
|
||||
for v := b.data.Front(); v != nil; v = v.Next() {
|
||||
// Copy to the allocated slice.
|
||||
data = append(data, v.AsSlice()...)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// Size indicates the total amount of data available in this Buffer.
|
||||
func (b *Buffer) Size() int64 {
|
||||
return b.size
|
||||
}
|
||||
|
||||
// AsViewList returns the ViewList backing b. Users may not save or modify the
|
||||
// ViewList returned.
|
||||
func (b *Buffer) AsViewList() ViewList {
|
||||
return b.data
|
||||
}
|
||||
|
||||
// Clone creates a copy-on-write clone of b. The underlying chunks are shared
|
||||
// until they are written to.
|
||||
func (b *Buffer) Clone() Buffer {
|
||||
other := Buffer{
|
||||
size: b.size,
|
||||
}
|
||||
for v := b.data.Front(); v != nil; v = v.Next() {
|
||||
newView := v.Clone()
|
||||
other.data.PushBack(newView)
|
||||
}
|
||||
return other
|
||||
}
|
||||
|
||||
// DeepClone creates a deep clone of b, copying data such that no bytes are
|
||||
// shared with any other Buffers.
|
||||
func (b *Buffer) DeepClone() Buffer {
|
||||
newBuf := Buffer{}
|
||||
buf := b.Clone()
|
||||
reader := buf.AsBufferReader()
|
||||
newBuf.WriteFromReader(&reader, b.size)
|
||||
return newBuf
|
||||
}
|
||||
|
||||
// Apply applies the given function across all valid data.
|
||||
func (b *Buffer) Apply(fn func(*View)) {
|
||||
for v := b.data.Front(); v != nil; v = v.Next() {
|
||||
d := v.Clone()
|
||||
fn(d)
|
||||
d.Release()
|
||||
}
|
||||
}
|
||||
|
||||
// SubApply applies fn to a given range of data in b. Any part of the range
|
||||
// outside of b is ignored.
|
||||
func (b *Buffer) SubApply(offset, length int, fn func(*View)) {
|
||||
for v := b.data.Front(); length > 0 && v != nil; v = v.Next() {
|
||||
if offset >= v.Size() {
|
||||
offset -= v.Size()
|
||||
continue
|
||||
}
|
||||
d := v.Clone()
|
||||
if offset > 0 {
|
||||
d.TrimFront(offset)
|
||||
offset = 0
|
||||
}
|
||||
if length < d.Size() {
|
||||
d.write = d.read + length
|
||||
}
|
||||
fn(d)
|
||||
length -= d.Size()
|
||||
d.Release()
|
||||
}
|
||||
}
|
||||
|
||||
// Checksum calculates a checksum over the buffer's payload starting at offset.
|
||||
func (b *Buffer) Checksum(offset int) uint16 {
|
||||
if offset >= int(b.size) {
|
||||
return 0
|
||||
}
|
||||
var v *View
|
||||
for v = b.data.Front(); v != nil && offset >= v.Size(); v = v.Next() {
|
||||
offset -= v.Size()
|
||||
}
|
||||
|
||||
var cs checksum.Checksumer
|
||||
cs.Add(v.AsSlice()[offset:])
|
||||
for v = v.Next(); v != nil; v = v.Next() {
|
||||
cs.Add(v.AsSlice())
|
||||
}
|
||||
return cs.Checksum()
|
||||
}
|
||||
|
||||
// Merge merges the provided Buffer with this one.
|
||||
//
|
||||
// The other Buffer will be appended to v, and other will be empty after this
|
||||
// operation completes.
|
||||
func (b *Buffer) Merge(other *Buffer) {
|
||||
b.data.PushBackList(&other.data)
|
||||
other.data = ViewList{}
|
||||
|
||||
// Adjust sizes.
|
||||
b.size += other.size
|
||||
other.size = 0
|
||||
}
|
||||
|
||||
// WriteFromReader writes to the buffer from an io.Reader. A maximum read size
|
||||
// of MaxChunkSize is enforced to prevent allocating views from the heap.
|
||||
func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
|
||||
return b.WriteFromReaderAndLimitedReader(r, count, nil)
|
||||
}
|
||||
|
||||
// WriteFromReaderAndLimitedReader is the same as WriteFromReader, but
|
||||
// optimized to avoid allocations if a LimitedReader is passed in.
|
||||
//
|
||||
// This function clobbers the values of lr.
|
||||
func (b *Buffer) WriteFromReaderAndLimitedReader(r io.Reader, count int64, lr *io.LimitedReader) (int64, error) {
|
||||
if lr == nil {
|
||||
lr = &io.LimitedReader{}
|
||||
}
|
||||
|
||||
var done int64
|
||||
for done < count {
|
||||
vsize := count - done
|
||||
if vsize > MaxChunkSize {
|
||||
vsize = MaxChunkSize
|
||||
}
|
||||
v := NewView(int(vsize))
|
||||
lr.R = r
|
||||
lr.N = vsize
|
||||
n, err := io.Copy(v, lr)
|
||||
b.Append(v)
|
||||
done += n
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return done, err
|
||||
}
|
||||
}
|
||||
return done, nil
|
||||
}
|
||||
|
||||
// ReadToWriter reads from the buffer into an io.Writer.
|
||||
//
|
||||
// N.B. This does not consume the bytes read. TrimFront should
|
||||
// be called appropriately after this call in order to do so.
|
||||
func (b *Buffer) ReadToWriter(w io.Writer, count int64) (int64, error) {
|
||||
bytesLeft := int(count)
|
||||
for v := b.data.Front(); v != nil && bytesLeft > 0; v = v.Next() {
|
||||
view := v.Clone()
|
||||
if view.Size() > bytesLeft {
|
||||
view.CapLength(bytesLeft)
|
||||
}
|
||||
n, err := io.Copy(w, view)
|
||||
bytesLeft -= int(n)
|
||||
view.Release()
|
||||
if err != nil {
|
||||
return count - int64(bytesLeft), err
|
||||
}
|
||||
}
|
||||
return count - int64(bytesLeft), nil
|
||||
}
|
||||
|
||||
// read implements the io.Reader interface. This method is used by BufferReader
|
||||
// to consume its underlying buffer. To perform io operations on buffers
|
||||
// directly, use ReadToWriter or WriteToReader.
|
||||
func (b *Buffer) read(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
done := 0
|
||||
v := b.data.Front()
|
||||
for v != nil && done < len(p) {
|
||||
n, err := v.Read(p[done:])
|
||||
done += n
|
||||
next := v.Next()
|
||||
if v.Size() == 0 {
|
||||
b.removeView(v)
|
||||
}
|
||||
b.size -= int64(n)
|
||||
if err != nil && err != io.EOF {
|
||||
return done, err
|
||||
}
|
||||
v = next
|
||||
}
|
||||
return done, nil
|
||||
}
|
||||
|
||||
// readByte implements the io.ByteReader interface. This method is used by
|
||||
// BufferReader to consume its underlying buffer. To perform io operations on
|
||||
// buffers directly, use ReadToWriter or WriteToReader.
|
||||
func (b *Buffer) readByte() (byte, error) {
|
||||
if b.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
v := b.data.Front()
|
||||
bt := v.AsSlice()[0]
|
||||
b.TrimFront(1)
|
||||
return bt, nil
|
||||
}
|
||||
|
||||
// AsBufferReader returns the Buffer as a BufferReader capable of io methods.
|
||||
// The new BufferReader takes ownership of b.
|
||||
func (b *Buffer) AsBufferReader() BufferReader {
|
||||
return BufferReader{b}
|
||||
}
|
||||
|
||||
// BufferReader implements io methods on Buffer. Users must call Close()
|
||||
// when finished with the buffer to free the underlying memory.
|
||||
type BufferReader struct {
|
||||
b *Buffer
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (br *BufferReader) Read(p []byte) (int, error) {
|
||||
return br.b.read(p)
|
||||
}
|
||||
|
||||
// ReadByte implements the io.ByteReader interface.
|
||||
func (br *BufferReader) ReadByte() (byte, error) {
|
||||
return br.b.readByte()
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (br *BufferReader) Close() {
|
||||
br.b.Release()
|
||||
}
|
||||
|
||||
// Len returns the number of bytes in the unread portion of the buffer.
|
||||
func (br *BufferReader) Len() int {
|
||||
return int(br.b.Size())
|
||||
}
|
||||
|
||||
// Range specifies a range of buffer.
|
||||
type Range struct {
|
||||
begin int
|
||||
end int
|
||||
}
|
||||
|
||||
// Intersect returns the intersection of x and y.
|
||||
func (x Range) Intersect(y Range) Range {
|
||||
if x.begin < y.begin {
|
||||
x.begin = y.begin
|
||||
}
|
||||
if x.end > y.end {
|
||||
x.end = y.end
|
||||
}
|
||||
if x.begin >= x.end {
|
||||
return Range{}
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Offset returns x offset by off.
|
||||
func (x Range) Offset(off int) Range {
|
||||
x.begin += off
|
||||
x.end += off
|
||||
return x
|
||||
}
|
||||
|
||||
// Len returns the length of x.
|
||||
func (x Range) Len() int {
|
||||
l := x.end - x.begin
|
||||
if l < 0 {
|
||||
l = 0
|
||||
}
|
||||
return l
|
||||
}
|
||||
29
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go
vendored
Normal file
29
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// saveData is invoked by stateify.
|
||||
func (b *Buffer) saveData() []byte {
|
||||
return b.Flatten()
|
||||
}
|
||||
|
||||
// loadData is invoked by stateify.
|
||||
func (b *Buffer) loadData(_ context.Context, data []byte) {
|
||||
*b = MakeWithData(data)
|
||||
}
|
||||
187
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go
vendored
Normal file
187
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (b *Buffer) StateTypeName() string {
|
||||
return "pkg/buffer.Buffer"
|
||||
}
|
||||
|
||||
func (b *Buffer) StateFields() []string {
|
||||
return []string{
|
||||
"data",
|
||||
"size",
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Buffer) StateSave(stateSinkObject state.Sink) {
|
||||
b.beforeSave()
|
||||
var dataValue []byte
|
||||
dataValue = b.saveData()
|
||||
stateSinkObject.SaveValue(0, dataValue)
|
||||
stateSinkObject.Save(1, &b.size)
|
||||
}
|
||||
|
||||
func (b *Buffer) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Buffer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(1, &b.size)
|
||||
stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(ctx, y.([]byte)) })
|
||||
}
|
||||
|
||||
func (c *chunk) StateTypeName() string {
|
||||
return "pkg/buffer.chunk"
|
||||
}
|
||||
|
||||
func (c *chunk) StateFields() []string {
|
||||
return []string{
|
||||
"chunkRefs",
|
||||
"data",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *chunk) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (c *chunk) StateSave(stateSinkObject state.Sink) {
|
||||
c.beforeSave()
|
||||
stateSinkObject.Save(0, &c.chunkRefs)
|
||||
stateSinkObject.Save(1, &c.data)
|
||||
}
|
||||
|
||||
func (c *chunk) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (c *chunk) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &c.chunkRefs)
|
||||
stateSourceObject.Load(1, &c.data)
|
||||
}
|
||||
|
||||
func (r *chunkRefs) StateTypeName() string {
|
||||
return "pkg/buffer.chunkRefs"
|
||||
}
|
||||
|
||||
func (r *chunkRefs) StateFields() []string {
|
||||
return []string{
|
||||
"refCount",
|
||||
}
|
||||
}
|
||||
|
||||
func (r *chunkRefs) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (r *chunkRefs) StateSave(stateSinkObject state.Sink) {
|
||||
r.beforeSave()
|
||||
stateSinkObject.Save(0, &r.refCount)
|
||||
}
|
||||
|
||||
// +checklocksignore
|
||||
func (r *chunkRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &r.refCount)
|
||||
stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) })
|
||||
}
|
||||
|
||||
func (v *View) StateTypeName() string {
|
||||
return "pkg/buffer.View"
|
||||
}
|
||||
|
||||
func (v *View) StateFields() []string {
|
||||
return []string{
|
||||
"read",
|
||||
"write",
|
||||
"chunk",
|
||||
}
|
||||
}
|
||||
|
||||
func (v *View) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (v *View) StateSave(stateSinkObject state.Sink) {
|
||||
v.beforeSave()
|
||||
stateSinkObject.Save(0, &v.read)
|
||||
stateSinkObject.Save(1, &v.write)
|
||||
stateSinkObject.Save(2, &v.chunk)
|
||||
}
|
||||
|
||||
func (v *View) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (v *View) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &v.read)
|
||||
stateSourceObject.Load(1, &v.write)
|
||||
stateSourceObject.Load(2, &v.chunk)
|
||||
}
|
||||
|
||||
func (l *ViewList) StateTypeName() string {
|
||||
return "pkg/buffer.ViewList"
|
||||
}
|
||||
|
||||
func (l *ViewList) StateFields() []string {
|
||||
return []string{
|
||||
"head",
|
||||
"tail",
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ViewList) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (l *ViewList) StateSave(stateSinkObject state.Sink) {
|
||||
l.beforeSave()
|
||||
stateSinkObject.Save(0, &l.head)
|
||||
stateSinkObject.Save(1, &l.tail)
|
||||
}
|
||||
|
||||
func (l *ViewList) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (l *ViewList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &l.head)
|
||||
stateSourceObject.Load(1, &l.tail)
|
||||
}
|
||||
|
||||
func (e *ViewEntry) StateTypeName() string {
|
||||
return "pkg/buffer.ViewEntry"
|
||||
}
|
||||
|
||||
func (e *ViewEntry) StateFields() []string {
|
||||
return []string{
|
||||
"next",
|
||||
"prev",
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ViewEntry) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (e *ViewEntry) StateSave(stateSinkObject state.Sink) {
|
||||
e.beforeSave()
|
||||
stateSinkObject.Save(0, &e.next)
|
||||
stateSinkObject.Save(1, &e.prev)
|
||||
}
|
||||
|
||||
func (e *ViewEntry) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (e *ViewEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &e.next)
|
||||
stateSourceObject.Load(1, &e.prev)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Buffer)(nil))
|
||||
state.Register((*chunk)(nil))
|
||||
state.Register((*chunkRefs)(nil))
|
||||
state.Register((*View)(nil))
|
||||
state.Register((*ViewList)(nil))
|
||||
state.Register((*ViewEntry)(nil))
|
||||
}
|
||||
3
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_unsafe_state_autogen.go
vendored
Normal file
3
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_unsafe_state_autogen.go
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package buffer
|
||||
113
vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go
vendored
Normal file
113
vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/bits"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// This is log2(baseChunkSize). This number is used to calculate which pool
|
||||
// to use for a payload size by right shifting the payload size by this
|
||||
// number and passing the result to MostSignificantOne64.
|
||||
baseChunkSizeLog2 = 6
|
||||
|
||||
// This is the size of the buffers in the first pool. Each subsequent pool
|
||||
// creates payloads 2^(pool index) times larger than the first pool's
|
||||
// payloads.
|
||||
baseChunkSize = 1 << baseChunkSizeLog2 // 64
|
||||
|
||||
// MaxChunkSize is largest payload size that we pool. Payloads larger than
|
||||
// this will be allocated from the heap and garbage collected as normal.
|
||||
MaxChunkSize = baseChunkSize << (numPools - 1) // 64k
|
||||
|
||||
// The number of chunk pools we have for use.
|
||||
numPools = 11
|
||||
)
|
||||
|
||||
// chunkPools is a collection of pools for payloads of different sizes. The
|
||||
// size of the payloads doubles in each successive pool.
|
||||
var chunkPools [numPools]sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := 0; i < numPools; i++ {
|
||||
chunkSize := baseChunkSize * (1 << i)
|
||||
chunkPools[i].New = func() any {
|
||||
return &chunk{
|
||||
data: make([]byte, chunkSize),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Precondition: 0 <= size <= maxChunkSize
|
||||
func getChunkPool(size int) *sync.Pool {
|
||||
idx := 0
|
||||
if size > baseChunkSize {
|
||||
idx = bits.MostSignificantOne64(uint64(size) >> baseChunkSizeLog2)
|
||||
if size > 1<<(idx+baseChunkSizeLog2) {
|
||||
idx++
|
||||
}
|
||||
}
|
||||
if idx >= numPools {
|
||||
panic(fmt.Sprintf("pool for chunk size %d does not exist", size))
|
||||
}
|
||||
return &chunkPools[idx]
|
||||
}
|
||||
|
||||
// Chunk represents a slice of pooled memory.
|
||||
//
|
||||
// +stateify savable
|
||||
type chunk struct {
|
||||
chunkRefs
|
||||
data []byte
|
||||
}
|
||||
|
||||
func newChunk(size int) *chunk {
|
||||
var c *chunk
|
||||
if size > MaxChunkSize {
|
||||
c = &chunk{
|
||||
data: make([]byte, size),
|
||||
}
|
||||
} else {
|
||||
pool := getChunkPool(size)
|
||||
c = pool.Get().(*chunk)
|
||||
clear(c.data)
|
||||
}
|
||||
c.InitRefs()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *chunk) destroy() {
|
||||
if len(c.data) > MaxChunkSize {
|
||||
c.data = nil
|
||||
return
|
||||
}
|
||||
pool := getChunkPool(len(c.data))
|
||||
pool.Put(c)
|
||||
}
|
||||
|
||||
func (c *chunk) DecRef() {
|
||||
c.chunkRefs.DecRef(c.destroy)
|
||||
}
|
||||
|
||||
func (c *chunk) Clone() *chunk {
|
||||
cpy := newChunk(len(c.data))
|
||||
copy(cpy.data, c.data)
|
||||
return cpy
|
||||
}
|
||||
142
vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go
vendored
Normal file
142
vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/atomicbitops"
|
||||
"gvisor.dev/gvisor/pkg/refs"
|
||||
)
|
||||
|
||||
// enableLogging indicates whether reference-related events should be logged (with
|
||||
// stack traces). This is false by default and should only be set to true for
|
||||
// debugging purposes, as it can generate an extremely large amount of output
|
||||
// and drastically degrade performance.
|
||||
const chunkenableLogging = false
|
||||
|
||||
// obj is used to customize logging. Note that we use a pointer to T so that
|
||||
// we do not copy the entire object when passed as a format parameter.
|
||||
var chunkobj *chunk
|
||||
|
||||
// Refs implements refs.RefCounter. It keeps a reference count using atomic
|
||||
// operations and calls the destructor when the count reaches zero.
|
||||
//
|
||||
// NOTE: Do not introduce additional fields to the Refs struct. It is used by
|
||||
// many filesystem objects, and we want to keep it as small as possible (i.e.,
|
||||
// the same size as using an int64 directly) to avoid taking up extra cache
|
||||
// space. In general, this template should not be extended at the cost of
|
||||
// performance. If it does not offer enough flexibility for a particular object
|
||||
// (example: b/187877947), we should implement the RefCounter/CheckedObject
|
||||
// interfaces manually.
|
||||
//
|
||||
// +stateify savable
|
||||
type chunkRefs struct {
|
||||
// refCount is composed of two fields:
|
||||
//
|
||||
// [32-bit speculative references]:[32-bit real references]
|
||||
//
|
||||
// Speculative references are used for TryIncRef, to avoid a CompareAndSwap
|
||||
// loop. See IncRef, DecRef and TryIncRef for details of how these fields are
|
||||
// used.
|
||||
refCount atomicbitops.Int64
|
||||
}
|
||||
|
||||
// InitRefs initializes r with one reference and, if enabled, activates leak
|
||||
// checking.
|
||||
func (r *chunkRefs) InitRefs() {
|
||||
|
||||
r.refCount.RacyStore(1)
|
||||
refs.Register(r)
|
||||
}
|
||||
|
||||
// RefType implements refs.CheckedObject.RefType.
|
||||
func (r *chunkRefs) RefType() string {
|
||||
return fmt.Sprintf("%T", chunkobj)[1:]
|
||||
}
|
||||
|
||||
// LeakMessage implements refs.CheckedObject.LeakMessage.
|
||||
func (r *chunkRefs) LeakMessage() string {
|
||||
return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
|
||||
}
|
||||
|
||||
// LogRefs implements refs.CheckedObject.LogRefs.
|
||||
func (r *chunkRefs) LogRefs() bool {
|
||||
return chunkenableLogging
|
||||
}
|
||||
|
||||
// ReadRefs returns the current number of references. The returned count is
|
||||
// inherently racy and is unsafe to use without external synchronization.
|
||||
func (r *chunkRefs) ReadRefs() int64 {
|
||||
return r.refCount.Load()
|
||||
}
|
||||
|
||||
// IncRef implements refs.RefCounter.IncRef.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r *chunkRefs) IncRef() {
|
||||
v := r.refCount.Add(1)
|
||||
if chunkenableLogging {
|
||||
refs.LogIncRef(r, v)
|
||||
}
|
||||
if v <= 1 {
|
||||
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
|
||||
}
|
||||
}
|
||||
|
||||
// TryIncRef implements refs.TryRefCounter.TryIncRef.
|
||||
//
|
||||
// To do this safely without a loop, a speculative reference is first acquired
|
||||
// on the object. This allows multiple concurrent TryIncRef calls to distinguish
|
||||
// other TryIncRef calls from genuine references held.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r *chunkRefs) TryIncRef() bool {
|
||||
const speculativeRef = 1 << 32
|
||||
if v := r.refCount.Add(speculativeRef); int32(v) == 0 {
|
||||
|
||||
r.refCount.Add(-speculativeRef)
|
||||
return false
|
||||
}
|
||||
|
||||
v := r.refCount.Add(-speculativeRef + 1)
|
||||
if chunkenableLogging {
|
||||
refs.LogTryIncRef(r, v)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DecRef implements refs.RefCounter.DecRef.
|
||||
//
|
||||
// Note that speculative references are counted here. Since they were added
|
||||
// prior to real references reaching zero, they will successfully convert to
|
||||
// real references. In other words, we see speculative references only in the
|
||||
// following case:
|
||||
//
|
||||
// A: TryIncRef [speculative increase => sees non-negative references]
|
||||
// B: DecRef [real decrease]
|
||||
// A: TryIncRef [transform speculative to real]
|
||||
//
|
||||
//go:nosplit
|
||||
func (r *chunkRefs) DecRef(destroy func()) {
|
||||
v := r.refCount.Add(-1)
|
||||
if chunkenableLogging {
|
||||
refs.LogDecRef(r, v)
|
||||
}
|
||||
switch {
|
||||
case v < 0:
|
||||
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
|
||||
|
||||
case v == 0:
|
||||
refs.Unregister(r)
|
||||
|
||||
if destroy != nil {
|
||||
destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *chunkRefs) afterLoad(context.Context) {
|
||||
if r.ReadRefs() > 0 {
|
||||
refs.Register(r)
|
||||
}
|
||||
}
|
||||
366
vendor/gvisor.dev/gvisor/pkg/buffer/view.go
vendored
Normal file
366
vendor/gvisor.dev/gvisor/pkg/buffer/view.go
vendored
Normal file
@@ -0,0 +1,366 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// ReadSize is the default amount that a View's size is increased by when an
|
||||
// io.Reader has more data than a View can hold during calls to ReadFrom.
|
||||
const ReadSize = 512
|
||||
|
||||
var viewPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &View{}
|
||||
},
|
||||
}
|
||||
|
||||
// View is a window into a shared chunk. Views are held by Buffers in
|
||||
// viewLists to represent contiguous memory.
|
||||
//
|
||||
// A View must be created with NewView, NewViewWithData, or Clone. Owners are
|
||||
// responsible for maintaining ownership over their views. When Views need to be
|
||||
// shared or copied, the owner should create a new View with Clone. Clone must
|
||||
// only ever be called on a owned View, not a borrowed one.
|
||||
//
|
||||
// Users are responsible for calling Release when finished with their View so
|
||||
// that its resources can be returned to the pool.
|
||||
//
|
||||
// Users must not write directly to slices returned by AsSlice. Instead, they
|
||||
// must use Write/WriteAt/CopyIn to modify the underlying View. This preserves
|
||||
// the safety guarantees of copy-on-write.
|
||||
//
|
||||
// +stateify savable
|
||||
type View struct {
|
||||
ViewEntry `state:"nosave"`
|
||||
read int
|
||||
write int
|
||||
chunk *chunk
|
||||
}
|
||||
|
||||
// NewView creates a new view with capacity at least as big as cap. It is
|
||||
// analogous to make([]byte, 0, cap).
|
||||
func NewView(cap int) *View {
|
||||
c := newChunk(cap)
|
||||
v := viewPool.Get().(*View)
|
||||
*v = View{chunk: c}
|
||||
return v
|
||||
}
|
||||
|
||||
// NewViewSize creates a new view with capacity at least as big as size and
|
||||
// length that is exactly size. It is analogous to make([]byte, size).
|
||||
func NewViewSize(size int) *View {
|
||||
v := NewView(size)
|
||||
v.Grow(size)
|
||||
return v
|
||||
}
|
||||
|
||||
// NewViewWithData creates a new view and initializes it with data. This
|
||||
// function should be used with caution to avoid unnecessary []byte allocations.
|
||||
// When in doubt use NewWithView to maximize chunk reuse in production
|
||||
// environments.
|
||||
func NewViewWithData(data []byte) *View {
|
||||
c := newChunk(len(data))
|
||||
v := viewPool.Get().(*View)
|
||||
*v = View{chunk: c}
|
||||
v.Write(data)
|
||||
return v
|
||||
}
|
||||
|
||||
// Clone creates a shallow clone of v where the underlying chunk is shared.
|
||||
//
|
||||
// The caller must own the View to call Clone. It is not safe to call Clone
|
||||
// on a borrowed or shared View because it can race with other View methods.
|
||||
func (v *View) Clone() *View {
|
||||
if v == nil {
|
||||
panic("cannot clone a nil view")
|
||||
}
|
||||
v.chunk.IncRef()
|
||||
newV := viewPool.Get().(*View)
|
||||
newV.chunk = v.chunk
|
||||
newV.read = v.read
|
||||
newV.write = v.write
|
||||
return newV
|
||||
}
|
||||
|
||||
// Release releases the chunk held by v and returns v to the pool.
|
||||
func (v *View) Release() {
|
||||
if v == nil {
|
||||
panic("cannot release a nil view")
|
||||
}
|
||||
v.chunk.DecRef()
|
||||
*v = View{}
|
||||
viewPool.Put(v)
|
||||
}
|
||||
|
||||
// Reset sets the view's read and write indices back to zero.
|
||||
func (v *View) Reset() {
|
||||
if v == nil {
|
||||
panic("cannot reset a nil view")
|
||||
}
|
||||
v.read = 0
|
||||
v.write = 0
|
||||
}
|
||||
|
||||
func (v *View) sharesChunk() bool {
|
||||
return v.chunk.refCount.Load() > 1
|
||||
}
|
||||
|
||||
// Full indicates the chunk is full.
|
||||
//
|
||||
// This indicates there is no capacity left to write.
|
||||
func (v *View) Full() bool {
|
||||
return v == nil || v.write == len(v.chunk.data)
|
||||
}
|
||||
|
||||
// Capacity returns the total size of this view's chunk.
|
||||
func (v *View) Capacity() int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return len(v.chunk.data)
|
||||
}
|
||||
|
||||
// Size returns the size of data written to the view.
|
||||
func (v *View) Size() int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return v.write - v.read
|
||||
}
|
||||
|
||||
// TrimFront advances the read index by the given amount.
|
||||
func (v *View) TrimFront(n int) {
|
||||
if v.read+n > v.write {
|
||||
panic("cannot trim past the end of a view")
|
||||
}
|
||||
v.read += n
|
||||
}
|
||||
|
||||
// AsSlice returns a slice of the data written to this view.
|
||||
func (v *View) AsSlice() []byte {
|
||||
if v.Size() == 0 {
|
||||
return nil
|
||||
}
|
||||
return v.chunk.data[v.read:v.write]
|
||||
}
|
||||
|
||||
// ToSlice returns an owned copy of the data in this view.
|
||||
func (v *View) ToSlice() []byte {
|
||||
if v.Size() == 0 {
|
||||
return nil
|
||||
}
|
||||
s := make([]byte, v.Size())
|
||||
copy(s, v.AsSlice())
|
||||
return s
|
||||
}
|
||||
|
||||
// AvailableSize returns the number of bytes available for writing.
|
||||
func (v *View) AvailableSize() int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return len(v.chunk.data) - v.write
|
||||
}
|
||||
|
||||
// Read reads v's data into p.
|
||||
//
|
||||
// Implements the io.Reader interface.
|
||||
func (v *View) Read(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if v.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(p, v.AsSlice())
|
||||
v.TrimFront(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadByte implements the io.ByteReader interface.
|
||||
func (v *View) ReadByte() (byte, error) {
|
||||
if v.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := v.AsSlice()[0]
|
||||
v.read++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until the view is empty or an error occurs. The
|
||||
// return value n is the number of bytes written.
|
||||
//
|
||||
// WriteTo implements the io.WriterTo interface.
|
||||
func (v *View) WriteTo(w io.Writer) (n int64, err error) {
|
||||
if v.Size() > 0 {
|
||||
sz := v.Size()
|
||||
m, e := w.Write(v.AsSlice())
|
||||
v.TrimFront(m)
|
||||
n = int64(m)
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
if m != sz {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadAt reads data to the p starting at offset.
|
||||
//
|
||||
// Implements the io.ReaderAt interface.
|
||||
func (v *View) ReadAt(p []byte, off int) (int, error) {
|
||||
if off < 0 || off > v.Size() {
|
||||
return 0, fmt.Errorf("ReadAt(): offset out of bounds: want 0 < off < %d, got off=%d", v.Size(), off)
|
||||
}
|
||||
n := copy(p, v.AsSlice()[off:])
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Write writes data to the view's chunk starting at the v.write index. If the
|
||||
// view's chunk has a reference count greater than 1, the chunk is copied first
|
||||
// and then written to.
|
||||
//
|
||||
// Implements the io.Writer interface.
|
||||
func (v *View) Write(p []byte) (int, error) {
|
||||
if v == nil {
|
||||
panic("cannot write to a nil view")
|
||||
}
|
||||
if v.AvailableSize() < len(p) {
|
||||
v.growCap(len(p) - v.AvailableSize())
|
||||
} else if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
v.chunk = v.chunk.Clone()
|
||||
}
|
||||
n := copy(v.chunk.data[v.write:], p)
|
||||
v.write += n
|
||||
if n < len(p) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
|
||||
// the buffer as needed. The return value n is the number of bytes read. Any
|
||||
// error except io.EOF encountered during the read is also returned.
|
||||
//
|
||||
// ReadFrom implements the io.ReaderFrom interface.
|
||||
func (v *View) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
if v == nil {
|
||||
panic("cannot write to a nil view")
|
||||
}
|
||||
if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
v.chunk = v.chunk.Clone()
|
||||
}
|
||||
for {
|
||||
// Check for EOF to avoid an unnnecesary allocation.
|
||||
if _, e := r.Read(nil); e == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if v.AvailableSize() == 0 {
|
||||
v.growCap(ReadSize)
|
||||
}
|
||||
m, e := r.Read(v.availableSlice())
|
||||
v.write += m
|
||||
n += int64(m)
|
||||
|
||||
if e == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteAt writes data to the views's chunk starting at start. If the
|
||||
// view's chunk has a reference count greater than 1, the chunk is copied first
|
||||
// and then written to.
|
||||
//
|
||||
// Implements the io.WriterAt interface.
|
||||
func (v *View) WriteAt(p []byte, off int) (int, error) {
|
||||
if v == nil {
|
||||
panic("cannot write to a nil view")
|
||||
}
|
||||
if off < 0 || off > v.Size() {
|
||||
return 0, fmt.Errorf("write offset out of bounds: want 0 < off < %d, got off=%d", v.Size(), off)
|
||||
}
|
||||
if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
v.chunk = v.chunk.Clone()
|
||||
}
|
||||
n := copy(v.AsSlice()[off:], p)
|
||||
if n < len(p) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Grow increases the size of the view. If the new size is greater than the
|
||||
// view's current capacity, Grow will reallocate the view with an increased
|
||||
// capacity.
|
||||
func (v *View) Grow(n int) {
|
||||
if v == nil {
|
||||
panic("cannot grow a nil view")
|
||||
}
|
||||
if v.write+n > v.Capacity() {
|
||||
v.growCap(n)
|
||||
}
|
||||
v.write += n
|
||||
}
|
||||
|
||||
// growCap increases the capacity of the view by at least n.
|
||||
func (v *View) growCap(n int) {
|
||||
if v == nil {
|
||||
panic("cannot grow a nil view")
|
||||
}
|
||||
defer v.chunk.DecRef()
|
||||
old := v.AsSlice()
|
||||
v.chunk = newChunk(v.Capacity() + n)
|
||||
copy(v.chunk.data, old)
|
||||
v.read = 0
|
||||
v.write = len(old)
|
||||
}
|
||||
|
||||
// CapLength caps the length of the view's read slice to n. If n > v.Size(),
|
||||
// the function is a no-op.
|
||||
func (v *View) CapLength(n int) {
|
||||
if v == nil {
|
||||
panic("cannot resize a nil view")
|
||||
}
|
||||
if n < 0 {
|
||||
panic("n must be >= 0")
|
||||
}
|
||||
if n > v.Size() {
|
||||
n = v.Size()
|
||||
}
|
||||
v.write = v.read + n
|
||||
}
|
||||
|
||||
func (v *View) availableSlice() []byte {
|
||||
if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
c := v.chunk.Clone()
|
||||
v.chunk = c
|
||||
}
|
||||
return v.chunk.data[v.write:]
|
||||
}
|
||||
239
vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go
vendored
Normal file
239
vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
package buffer
|
||||
|
||||
// ElementMapper provides an identity mapping by default.
|
||||
//
|
||||
// This can be replaced to provide a struct that maps elements to linker
|
||||
// objects, if they are not the same. An ElementMapper is not typically
|
||||
// required if: Linker is left as is, Element is left as is, or Linker and
|
||||
// Element are the same type.
|
||||
type ViewElementMapper struct{}
|
||||
|
||||
// linkerFor maps an Element to a Linker.
|
||||
//
|
||||
// This default implementation should be inlined.
|
||||
//
|
||||
//go:nosplit
|
||||
func (ViewElementMapper) linkerFor(elem *View) *View { return elem }
|
||||
|
||||
// List is an intrusive list. Entries can be added to or removed from the list
|
||||
// in O(1) time and with no additional memory allocations.
|
||||
//
|
||||
// The zero value for List is an empty list ready to use.
|
||||
//
|
||||
// To iterate over a list (where l is a List):
|
||||
//
|
||||
// for e := l.Front(); e != nil; e = e.Next() {
|
||||
// // do something with e.
|
||||
// }
|
||||
//
|
||||
// +stateify savable
|
||||
type ViewList struct {
|
||||
head *View
|
||||
tail *View
|
||||
}
|
||||
|
||||
// Reset resets list l to the empty state.
|
||||
func (l *ViewList) Reset() {
|
||||
l.head = nil
|
||||
l.tail = nil
|
||||
}
|
||||
|
||||
// Empty returns true iff the list is empty.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Empty() bool {
|
||||
return l.head == nil
|
||||
}
|
||||
|
||||
// Front returns the first element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Front() *View {
|
||||
return l.head
|
||||
}
|
||||
|
||||
// Back returns the last element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Back() *View {
|
||||
return l.tail
|
||||
}
|
||||
|
||||
// Len returns the number of elements in the list.
|
||||
//
|
||||
// NOTE: This is an O(n) operation.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Len() (count int) {
|
||||
for e := l.Front(); e != nil; e = (ViewElementMapper{}.linkerFor(e)).Next() {
|
||||
count++
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// PushFront inserts the element e at the front of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushFront(e *View) {
|
||||
linker := ViewElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(l.head)
|
||||
linker.SetPrev(nil)
|
||||
if l.head != nil {
|
||||
ViewElementMapper{}.linkerFor(l.head).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
l.head = e
|
||||
}
|
||||
|
||||
// PushFrontList inserts list m at the start of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushFrontList(m *ViewList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
ViewElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
|
||||
ViewElementMapper{}.linkerFor(m.tail).SetNext(l.head)
|
||||
|
||||
l.head = m.head
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// PushBack inserts the element e at the back of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushBack(e *View) {
|
||||
linker := ViewElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(l.tail)
|
||||
if l.tail != nil {
|
||||
ViewElementMapper{}.linkerFor(l.tail).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
// PushBackList inserts list m at the end of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushBackList(m *ViewList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
ViewElementMapper{}.linkerFor(l.tail).SetNext(m.head)
|
||||
ViewElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
|
||||
|
||||
l.tail = m.tail
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// InsertAfter inserts e after b.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) InsertAfter(b, e *View) {
|
||||
bLinker := ViewElementMapper{}.linkerFor(b)
|
||||
eLinker := ViewElementMapper{}.linkerFor(e)
|
||||
|
||||
a := bLinker.Next()
|
||||
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
bLinker.SetNext(e)
|
||||
|
||||
if a != nil {
|
||||
ViewElementMapper{}.linkerFor(a).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
}
|
||||
|
||||
// InsertBefore inserts e before a.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) InsertBefore(a, e *View) {
|
||||
aLinker := ViewElementMapper{}.linkerFor(a)
|
||||
eLinker := ViewElementMapper{}.linkerFor(e)
|
||||
|
||||
b := aLinker.Prev()
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
aLinker.SetPrev(e)
|
||||
|
||||
if b != nil {
|
||||
ViewElementMapper{}.linkerFor(b).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes e from l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Remove(e *View) {
|
||||
linker := ViewElementMapper{}.linkerFor(e)
|
||||
prev := linker.Prev()
|
||||
next := linker.Next()
|
||||
|
||||
if prev != nil {
|
||||
ViewElementMapper{}.linkerFor(prev).SetNext(next)
|
||||
} else if l.head == e {
|
||||
l.head = next
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
ViewElementMapper{}.linkerFor(next).SetPrev(prev)
|
||||
} else if l.tail == e {
|
||||
l.tail = prev
|
||||
}
|
||||
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(nil)
|
||||
}
|
||||
|
||||
// Entry is a default implementation of Linker. Users can add anonymous fields
|
||||
// of this type to their structs to make them automatically implement the
|
||||
// methods needed by List.
|
||||
//
|
||||
// +stateify savable
|
||||
type ViewEntry struct {
|
||||
next *View
|
||||
prev *View
|
||||
}
|
||||
|
||||
// Next returns the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) Next() *View {
|
||||
return e.next
|
||||
}
|
||||
|
||||
// Prev returns the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) Prev() *View {
|
||||
return e.prev
|
||||
}
|
||||
|
||||
// SetNext assigns 'entry' as the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) SetNext(elem *View) {
|
||||
e.next = elem
|
||||
}
|
||||
|
||||
// SetPrev assigns 'entry' as the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) SetPrev(elem *View) {
|
||||
e.prev = elem
|
||||
}
|
||||
26
vendor/gvisor.dev/gvisor/pkg/buffer/view_unsafe.go
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/buffer/view_unsafe.go
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// BasePtr returns a pointer to the view's chunk.
|
||||
func (v *View) BasePtr() *byte {
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&v.chunk.data))
|
||||
return (*byte)(unsafe.Pointer(hdr.Data))
|
||||
}
|
||||
228
vendor/gvisor.dev/gvisor/pkg/context/context.go
vendored
Normal file
228
vendor/gvisor.dev/gvisor/pkg/context/context.go
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package context defines an internal context type.
|
||||
//
|
||||
// The given Context conforms to the standard Go context, but mandates
|
||||
// additional methods that are specific to the kernel internals. Note however,
|
||||
// that the Context described by this package carries additional constraints
|
||||
// regarding concurrent access and retaining beyond the scope of a call.
|
||||
//
|
||||
// See the Context type for complete details.
|
||||
package context
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/log"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
)
|
||||
|
||||
// Blocker represents an object with control flow hooks.
|
||||
//
|
||||
// These may be used to perform blocking operations, sleep or otherwise
|
||||
// wait, since there may be asynchronous events that require processing.
|
||||
type Blocker interface {
|
||||
// Interrupt interrupts any Block operations.
|
||||
Interrupt()
|
||||
|
||||
// Interrupted notes whether this context is Interrupted.
|
||||
Interrupted() bool
|
||||
|
||||
// BlockOn blocks until one of the previously registered events occurs,
|
||||
// or some external interrupt (cancellation).
|
||||
//
|
||||
// The return value should indicate whether the wake-up occurred as a
|
||||
// result of the requested event (versus an external interrupt).
|
||||
BlockOn(waiter.Waitable, waiter.EventMask) bool
|
||||
|
||||
// Block blocks until an event is received from C, or some external
|
||||
// interrupt. It returns nil if an event is received from C and an err if t
|
||||
// is interrupted.
|
||||
Block(C <-chan struct{}) error
|
||||
|
||||
// BlockWithTimeoutOn blocks until either the conditions of Block are
|
||||
// satisfied, or the timeout is hit. Note that deadlines are not supported
|
||||
// since the notion of "with respect to what clock" is not resolved.
|
||||
//
|
||||
// The return value is per BlockOn.
|
||||
BlockWithTimeoutOn(waiter.Waitable, waiter.EventMask, time.Duration) (time.Duration, bool)
|
||||
|
||||
// UninterruptibleSleepStart indicates the beginning of an uninterruptible
|
||||
// sleep state (equivalent to Linux's TASK_UNINTERRUPTIBLE). If deactivate
|
||||
// is true and the Context represents a Task, the Task's AddressSpace is
|
||||
// deactivated.
|
||||
UninterruptibleSleepStart(deactivate bool)
|
||||
|
||||
// UninterruptibleSleepFinish indicates the end of an uninterruptible sleep
|
||||
// state that was begun by a previous call to UninterruptibleSleepStart. If
|
||||
// activate is true and the Context represents a Task, the Task's
|
||||
// AddressSpace is activated. Normally activate is the same value as the
|
||||
// deactivate parameter passed to UninterruptibleSleepStart.
|
||||
UninterruptibleSleepFinish(activate bool)
|
||||
}
|
||||
|
||||
// NoTask is an implementation of Blocker that does not block.
|
||||
type NoTask struct {
|
||||
cancel chan struct{}
|
||||
}
|
||||
|
||||
// Interrupt implements Blocker.Interrupt.
|
||||
func (nt *NoTask) Interrupt() {
|
||||
select {
|
||||
case nt.cancel <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Interrupted implements Blocker.Interrupted.
|
||||
func (nt *NoTask) Interrupted() bool {
|
||||
return nt.cancel != nil && len(nt.cancel) > 0
|
||||
}
|
||||
|
||||
// Block implements Blocker.Block.
|
||||
func (nt *NoTask) Block(C <-chan struct{}) error {
|
||||
if nt.cancel == nil {
|
||||
nt.cancel = make(chan struct{}, 1)
|
||||
}
|
||||
select {
|
||||
case <-nt.cancel:
|
||||
return errors.New("interrupted system call") // Interrupted.
|
||||
case <-C:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BlockOn implements Blocker.BlockOn.
|
||||
func (nt *NoTask) BlockOn(w waiter.Waitable, mask waiter.EventMask) bool {
|
||||
if nt.cancel == nil {
|
||||
nt.cancel = make(chan struct{}, 1)
|
||||
}
|
||||
e, ch := waiter.NewChannelEntry(mask)
|
||||
w.EventRegister(&e)
|
||||
defer w.EventUnregister(&e)
|
||||
select {
|
||||
case <-nt.cancel:
|
||||
return false // Interrupted.
|
||||
case _, ok := <-ch:
|
||||
return ok
|
||||
}
|
||||
}
|
||||
|
||||
// BlockWithTimeoutOn implements Blocker.BlockWithTimeoutOn.
|
||||
func (nt *NoTask) BlockWithTimeoutOn(w waiter.Waitable, mask waiter.EventMask, duration time.Duration) (time.Duration, bool) {
|
||||
if nt.cancel == nil {
|
||||
nt.cancel = make(chan struct{}, 1)
|
||||
}
|
||||
e, ch := waiter.NewChannelEntry(mask)
|
||||
w.EventRegister(&e)
|
||||
defer w.EventUnregister(&e)
|
||||
start := time.Now() // In system time.
|
||||
t := time.AfterFunc(duration, func() { ch <- struct{}{} })
|
||||
select {
|
||||
case <-nt.cancel:
|
||||
return time.Since(start), false // Interrupted.
|
||||
case _, ok := <-ch:
|
||||
if ok && t.Stop() {
|
||||
// Timer never fired.
|
||||
return time.Since(start), ok
|
||||
}
|
||||
// Timer fired, remain is zero.
|
||||
return time.Duration(0), ok
|
||||
}
|
||||
}
|
||||
|
||||
// UninterruptibleSleepStart implmenents Blocker.UninterruptedSleepStart.
|
||||
func (*NoTask) UninterruptibleSleepStart(bool) {}
|
||||
|
||||
// UninterruptibleSleepFinish implmenents Blocker.UninterruptibleSleepFinish.
|
||||
func (*NoTask) UninterruptibleSleepFinish(bool) {}
|
||||
|
||||
// Context represents a thread of execution (hereafter "goroutine" to reflect
|
||||
// Go idiosyncrasy). It carries state associated with the goroutine across API
|
||||
// boundaries.
|
||||
//
|
||||
// While Context exists for essentially the same reasons as Go's standard
|
||||
// context.Context, the standard type represents the state of an operation
|
||||
// rather than that of a goroutine. This is a critical distinction:
|
||||
//
|
||||
// - Unlike context.Context, which "may be passed to functions running in
|
||||
// different goroutines", it is *not safe* to use the same Context in multiple
|
||||
// concurrent goroutines.
|
||||
//
|
||||
// - It is *not safe* to retain a Context passed to a function beyond the scope
|
||||
// of that function call.
|
||||
//
|
||||
// In both cases, values extracted from the Context should be used instead.
|
||||
type Context interface {
|
||||
context.Context
|
||||
log.Logger
|
||||
Blocker
|
||||
}
|
||||
|
||||
// logContext implements basic logging.
|
||||
type logContext struct {
|
||||
NoTask
|
||||
log.Logger
|
||||
context.Context
|
||||
}
|
||||
|
||||
// bgContext is the context returned by context.Background.
|
||||
var bgContext Context
|
||||
var bgOnce sync.Once
|
||||
|
||||
// Background returns an empty context using the default logger.
|
||||
// Generally, one should use the Task as their context when available, or avoid
|
||||
// having to use a context in places where a Task is unavailable.
|
||||
//
|
||||
// Using a Background context for tests is fine, as long as no values are
|
||||
// needed from the context in the tested code paths.
|
||||
//
|
||||
// The global log.SetTarget() must be called before context.Background()
|
||||
func Background() Context {
|
||||
bgOnce.Do(func() {
|
||||
bgContext = &logContext{
|
||||
Context: context.Background(),
|
||||
Logger: log.Log(),
|
||||
}
|
||||
})
|
||||
return bgContext
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
func WithValue(parent Context, key, val any) Context {
|
||||
return &withValue{
|
||||
Context: parent,
|
||||
key: key,
|
||||
val: val,
|
||||
}
|
||||
}
|
||||
|
||||
type withValue struct {
|
||||
Context
|
||||
key any
|
||||
val any
|
||||
}
|
||||
|
||||
// Value implements Context.Value.
|
||||
func (ctx *withValue) Value(key any) any {
|
||||
if key == ctx.key {
|
||||
return ctx.val
|
||||
}
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
3
vendor/gvisor.dev/gvisor/pkg/context/context_state_autogen.go
vendored
Normal file
3
vendor/gvisor.dev/gvisor/pkg/context/context_state_autogen.go
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package context
|
||||
264
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go
vendored
Normal file
264
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go
vendored
Normal file
@@ -0,0 +1,264 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cpuid provides basic functionality for creating and adjusting CPU
|
||||
// feature sets.
|
||||
//
|
||||
// Each architecture should define its own FeatureSet type, that must be
|
||||
// savable, along with an allFeatures map, appropriate arch hooks and a
|
||||
// HostFeatureSet function. This file contains common functionality to all
|
||||
// architectures, which is essentially string munging and some errors.
|
||||
//
|
||||
// Individual architectures may export methods on FeatureSet that are relevant,
|
||||
// e.g. FeatureSet.Vendor(). Common to all architectures, FeatureSets include
|
||||
// HasFeature, which provides a trivial mechanism to test for the presence of
|
||||
// specific hardware features. The hardware features are also defined on a
|
||||
// per-architecture basis.
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/log"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// contextID is the package for anyContext.Context.Value keys.
|
||||
type contextID int
|
||||
|
||||
const (
|
||||
// CtxFeatureSet is the FeatureSet for the context.
|
||||
CtxFeatureSet contextID = iota
|
||||
|
||||
// hardware capability bit vector.
|
||||
_AT_HWCAP = 16
|
||||
// hardware capability bit vector 2.
|
||||
_AT_HWCAP2 = 26
|
||||
)
|
||||
|
||||
// anyContext represents context.Context.
|
||||
type anyContext interface {
|
||||
Value(key any) any
|
||||
}
|
||||
|
||||
// FromContext returns the FeatureSet from the context, if available.
|
||||
func FromContext(ctx anyContext) FeatureSet {
|
||||
v := ctx.Value(CtxFeatureSet)
|
||||
if v == nil {
|
||||
return FeatureSet{} // Panics if used.
|
||||
}
|
||||
return v.(FeatureSet)
|
||||
}
|
||||
|
||||
// Feature is a unique identifier for a particular cpu feature. We just use an
|
||||
// int as a feature number on x86 and arm64.
|
||||
//
|
||||
// On x86, features are numbered according to "blocks". Each block is 32 bits, and
|
||||
// feature bits from the same source (cpuid leaf/level) are in the same block.
|
||||
//
|
||||
// On arm64, features are numbered according to the ELF HWCAP definition, from
|
||||
// arch/arm64/include/uapi/asm/hwcap.h.
|
||||
type Feature int
|
||||
|
||||
// allFeatureInfo is the value for allFeatures.
|
||||
type allFeatureInfo struct {
|
||||
// displayName is the short display name for the feature.
|
||||
displayName string
|
||||
|
||||
// shouldAppear indicates whether the feature normally appears in
|
||||
// cpuinfo. This affects FlagString only.
|
||||
shouldAppear bool
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer.String.
|
||||
func (f Feature) String() string {
|
||||
info, ok := allFeatures[f]
|
||||
if ok {
|
||||
return info.displayName
|
||||
}
|
||||
return fmt.Sprintf("[0x%x?]", int(f)) // No given name.
|
||||
}
|
||||
|
||||
// reverseMap is a map from displayName to Feature.
|
||||
var reverseMap = func() map[string]Feature {
|
||||
m := make(map[string]Feature)
|
||||
for feature, info := range allFeatures {
|
||||
if info.displayName != "" {
|
||||
// Sanity check that the name is unique.
|
||||
if old, ok := m[info.displayName]; ok {
|
||||
panic(fmt.Sprintf("feature %v has conflicting values (0x%x vs 0x%x)", info.displayName, old, feature))
|
||||
}
|
||||
m[info.displayName] = feature
|
||||
}
|
||||
}
|
||||
return m
|
||||
}()
|
||||
|
||||
// FeatureFromString returns the Feature associated with the given feature
|
||||
// string plus a bool to indicate if it could find the feature.
|
||||
func FeatureFromString(s string) (Feature, bool) {
|
||||
feature, ok := reverseMap[s]
|
||||
return feature, ok
|
||||
}
|
||||
|
||||
// AllFeatures returns the full set of all possible features.
|
||||
func AllFeatures() (features []Feature) {
|
||||
archFlagOrder(func(f Feature) {
|
||||
features = append(features, f)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Subtract returns the features present in fs that are not present in other.
|
||||
// If all features in fs are present in other, Subtract returns nil.
|
||||
//
|
||||
// This does not check for any kinds of incompatibility.
|
||||
func (fs FeatureSet) Subtract(other FeatureSet) (left map[Feature]struct{}) {
|
||||
for feature := range allFeatures {
|
||||
thisHas := fs.HasFeature(feature)
|
||||
otherHas := other.HasFeature(feature)
|
||||
if thisHas && !otherHas {
|
||||
if left == nil {
|
||||
left = make(map[Feature]struct{})
|
||||
}
|
||||
left[feature] = struct{}{}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FlagString prints out supported CPU flags.
|
||||
func (fs FeatureSet) FlagString() string {
|
||||
var s []string
|
||||
archFlagOrder(func(feature Feature) {
|
||||
if !fs.HasFeature(feature) {
|
||||
return
|
||||
}
|
||||
info := allFeatures[feature]
|
||||
if !info.shouldAppear {
|
||||
return
|
||||
}
|
||||
s = append(s, info.displayName)
|
||||
})
|
||||
return strings.Join(s, " ")
|
||||
}
|
||||
|
||||
// ErrIncompatible is returned for incompatible feature sets.
|
||||
type ErrIncompatible struct {
|
||||
reason string
|
||||
}
|
||||
|
||||
// Error implements error.Error.
|
||||
func (e *ErrIncompatible) Error() string {
|
||||
return fmt.Sprintf("incompatible FeatureSet: %v", e.reason)
|
||||
}
|
||||
|
||||
// CheckHostCompatible returns nil if fs is a subset of the host feature set.
|
||||
func (fs FeatureSet) CheckHostCompatible() error {
|
||||
hfs := HostFeatureSet()
|
||||
|
||||
// Check that hfs is a superset of fs.
|
||||
if diff := fs.Subtract(hfs); len(diff) > 0 {
|
||||
return &ErrIncompatible{
|
||||
reason: fmt.Sprintf("missing features: %v", diff),
|
||||
}
|
||||
}
|
||||
|
||||
// Make arch-specific checks.
|
||||
return fs.archCheckHostCompatible(hfs)
|
||||
}
|
||||
|
||||
// +stateify savable
|
||||
type hwCap struct {
|
||||
// hwCap1 stores HWCAP bits exposed through the elf auxiliary vector.
|
||||
hwCap1 uint64
|
||||
// hwCap2 stores HWCAP2 bits exposed through the elf auxiliary vector.
|
||||
hwCap2 uint64
|
||||
}
|
||||
|
||||
// The auxiliary vector of a process on the Linux system can be read
|
||||
// from /proc/self/auxv, and tags and values are stored as 8-bytes
|
||||
// decimal key-value pairs on the 64-bit system.
|
||||
//
|
||||
// $ od -t d8 /proc/self/auxv
|
||||
//
|
||||
// 0000000 33 140734615224320
|
||||
// 0000020 16 3219913727
|
||||
// 0000040 6 4096
|
||||
// 0000060 17 100
|
||||
// 0000100 3 94665627353152
|
||||
// 0000120 4 56
|
||||
// 0000140 5 9
|
||||
// 0000160 7 140425502162944
|
||||
// 0000200 8 0
|
||||
// 0000220 9 94665627365760
|
||||
// 0000240 11 1000
|
||||
// 0000260 12 1000
|
||||
// 0000300 13 1000
|
||||
// 0000320 14 1000
|
||||
// 0000340 23 0
|
||||
// 0000360 25 140734614619513
|
||||
// 0000400 26 0
|
||||
// 0000420 31 140734614626284
|
||||
// 0000440 15 140734614619529
|
||||
// 0000460 0 0
|
||||
func readHWCap(auxvFilepath string) (hwCap, error) {
|
||||
c := hwCap{}
|
||||
if runtime.GOOS != "linux" {
|
||||
// Don't try to read Linux-specific /proc files.
|
||||
return c, fmt.Errorf("readHwCap only supported on linux, not %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
auxv, err := os.ReadFile(auxvFilepath)
|
||||
if err != nil {
|
||||
return c, fmt.Errorf("failed to read file %s: %w", auxvFilepath, err)
|
||||
}
|
||||
|
||||
l := len(auxv) / 16
|
||||
for i := 0; i < l; i++ {
|
||||
tag := binary.LittleEndian.Uint64(auxv[i*16:])
|
||||
val := binary.LittleEndian.Uint64(auxv[i*16+8:])
|
||||
if tag == _AT_HWCAP {
|
||||
c.hwCap1 = val
|
||||
} else if tag == _AT_HWCAP2 {
|
||||
c.hwCap2 = val
|
||||
}
|
||||
|
||||
if (c.hwCap1 != 0) && (c.hwCap2 != 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func initHWCap() {
|
||||
c, err := readHWCap("/proc/self/auxv")
|
||||
if err != nil {
|
||||
log.Warningf("cpuid HWCap not initialized: %w", err)
|
||||
} else {
|
||||
hostFeatureSet.hwCap = c
|
||||
}
|
||||
}
|
||||
|
||||
var initOnce sync.Once
|
||||
|
||||
// Initialize initializes the global data structures used by this package.
|
||||
// Must be called prior to using anything else in this package.
|
||||
func Initialize() {
|
||||
initOnce.Do(archInitialize)
|
||||
}
|
||||
475
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go
vendored
Normal file
475
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go
vendored
Normal file
@@ -0,0 +1,475 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64
|
||||
// +build amd64
|
||||
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// FeatureSet defines features in terms of CPUID leaves and bits.
|
||||
// The kernel also exposes the presence of features to userspace through
|
||||
// a set of flags(HWCAP/HWCAP2) bits, exposed in the auxiliary vector, which
|
||||
// are necessary to read for some features (e.g. FSGSBASE).
|
||||
//
|
||||
// Common references:
|
||||
//
|
||||
// Intel:
|
||||
// - Intel SDM Volume 2, Chapter 3.2 "CPUID" (more up-to-date)
|
||||
// - Intel Application Note 485 (more detailed)
|
||||
//
|
||||
// AMD:
|
||||
// - AMD64 APM Volume 3, Appendix 3 "Obtaining Processor Information ..."
|
||||
//
|
||||
// +stateify savable
|
||||
type FeatureSet struct {
|
||||
// Function is the underlying CPUID Function.
|
||||
//
|
||||
// This is exported to allow direct calls of the underlying CPUID
|
||||
// function, where required.
|
||||
Function `state:".(Static)"`
|
||||
// hwCap stores HWCAP1/2 exposed from the elf auxiliary vector.
|
||||
hwCap hwCap
|
||||
}
|
||||
|
||||
// saveFunction saves the function as a static query.
|
||||
func (fs *FeatureSet) saveFunction() Static {
|
||||
if s, ok := fs.Function.(Static); ok {
|
||||
return s
|
||||
}
|
||||
return fs.ToStatic()
|
||||
}
|
||||
|
||||
// loadFunction saves the function as a static query.
|
||||
func (fs *FeatureSet) loadFunction(_ context.Context, s Static) {
|
||||
fs.Function = s
|
||||
}
|
||||
|
||||
// Helper to convert 3 regs into 12-byte vendor ID.
|
||||
//
|
||||
//go:nosplit
|
||||
func vendorIDFromRegs(bx, cx, dx uint32) (r [12]byte) {
|
||||
for i := uint(0); i < 4; i++ {
|
||||
b := byte(bx >> (i * 8))
|
||||
r[i] = b
|
||||
}
|
||||
|
||||
for i := uint(0); i < 4; i++ {
|
||||
b := byte(dx >> (i * 8))
|
||||
r[4+i] = b
|
||||
}
|
||||
|
||||
for i := uint(0); i < 4; i++ {
|
||||
b := byte(cx >> (i * 8))
|
||||
r[8+i] = b
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Helper to merge a 12-byte vendor ID back to registers.
|
||||
//
|
||||
// Used by static_amd64.go.
|
||||
func regsFromVendorID(r [12]byte) (bx, cx, dx uint32) {
|
||||
bx |= uint32(r[0])
|
||||
bx |= uint32(r[1]) << 8
|
||||
bx |= uint32(r[2]) << 16
|
||||
bx |= uint32(r[3]) << 24
|
||||
cx |= uint32(r[4])
|
||||
cx |= uint32(r[5]) << 8
|
||||
cx |= uint32(r[6]) << 16
|
||||
cx |= uint32(r[7]) << 24
|
||||
dx |= uint32(r[8])
|
||||
dx |= uint32(r[9]) << 8
|
||||
dx |= uint32(r[10]) << 16
|
||||
dx |= uint32(r[10]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// VendorID is the 12-char string returned in ebx:edx:ecx for eax=0.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) VendorID() [12]byte {
|
||||
_, bx, cx, dx := fs.query(vendorID)
|
||||
return vendorIDFromRegs(bx, cx, dx)
|
||||
}
|
||||
|
||||
// Helper to deconstruct signature dword.
|
||||
//
|
||||
//go:nosplit
|
||||
func signatureSplit(v uint32) (ef, em, pt, f, m, sid uint8) {
|
||||
sid = uint8(v & 0xf)
|
||||
m = uint8(v>>4) & 0xf
|
||||
f = uint8(v>>8) & 0xf
|
||||
pt = uint8(v>>12) & 0x3
|
||||
em = uint8(v>>16) & 0xf
|
||||
ef = uint8(v >> 20)
|
||||
return
|
||||
}
|
||||
|
||||
// ExtendedFamily is part of the processor signature.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) ExtendedFamily() uint8 {
|
||||
ax, _, _, _ := fs.query(featureInfo)
|
||||
ef, _, _, _, _, _ := signatureSplit(ax)
|
||||
return ef
|
||||
}
|
||||
|
||||
// ExtendedModel is part of the processor signature.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) ExtendedModel() uint8 {
|
||||
ax, _, _, _ := fs.query(featureInfo)
|
||||
_, em, _, _, _, _ := signatureSplit(ax)
|
||||
return em
|
||||
}
|
||||
|
||||
// ProcessorType is part of the processor signature.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) ProcessorType() uint8 {
|
||||
ax, _, _, _ := fs.query(featureInfo)
|
||||
_, _, pt, _, _, _ := signatureSplit(ax)
|
||||
return pt
|
||||
}
|
||||
|
||||
// Family is part of the processor signature.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) Family() uint8 {
|
||||
ax, _, _, _ := fs.query(featureInfo)
|
||||
_, _, _, f, _, _ := signatureSplit(ax)
|
||||
return f
|
||||
}
|
||||
|
||||
// Model is part of the processor signature.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) Model() uint8 {
|
||||
ax, _, _, _ := fs.query(featureInfo)
|
||||
_, _, _, _, m, _ := signatureSplit(ax)
|
||||
return m
|
||||
}
|
||||
|
||||
// SteppingID is part of the processor signature.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) SteppingID() uint8 {
|
||||
ax, _, _, _ := fs.query(featureInfo)
|
||||
_, _, _, _, _, sid := signatureSplit(ax)
|
||||
return sid
|
||||
}
|
||||
|
||||
// VirtualAddressBits returns the number of bits available for virtual
|
||||
// addresses.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) VirtualAddressBits() uint32 {
|
||||
ax, _, _, _ := fs.query(addressSizes)
|
||||
return (ax >> 8) & 0xff
|
||||
}
|
||||
|
||||
// PhysicalAddressBits returns the number of bits available for physical
|
||||
// addresses.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) PhysicalAddressBits() uint32 {
|
||||
ax, _, _, _ := fs.query(addressSizes)
|
||||
return ax & 0xff
|
||||
}
|
||||
|
||||
// CacheType describes the type of a cache, as returned in eax[4:0] for eax=4.
|
||||
type CacheType uint8
|
||||
|
||||
const (
|
||||
// cacheNull indicates that there are no more entries.
|
||||
cacheNull CacheType = iota
|
||||
|
||||
// CacheData is a data cache.
|
||||
CacheData
|
||||
|
||||
// CacheInstruction is an instruction cache.
|
||||
CacheInstruction
|
||||
|
||||
// CacheUnified is a unified instruction and data cache.
|
||||
CacheUnified
|
||||
)
|
||||
|
||||
// Cache describes the parameters of a single cache on the system.
|
||||
//
|
||||
// This is returned by the Caches method on FeatureSet.
|
||||
type Cache struct {
|
||||
// Level is the hierarchical level of this cache (L1, L2, etc).
|
||||
Level uint32
|
||||
|
||||
// Type is the type of cache.
|
||||
Type CacheType
|
||||
|
||||
// FullyAssociative indicates that entries may be placed in any block.
|
||||
FullyAssociative bool
|
||||
|
||||
// Partitions is the number of physical partitions in the cache.
|
||||
Partitions uint32
|
||||
|
||||
// Ways is the number of ways of associativity in the cache.
|
||||
Ways uint32
|
||||
|
||||
// Sets is the number of sets in the cache.
|
||||
Sets uint32
|
||||
|
||||
// InvalidateHierarchical indicates that WBINVD/INVD from threads
|
||||
// sharing this cache acts upon lower level caches for threads sharing
|
||||
// this cache.
|
||||
InvalidateHierarchical bool
|
||||
|
||||
// Inclusive indicates that this cache is inclusive of lower cache
|
||||
// levels.
|
||||
Inclusive bool
|
||||
|
||||
// DirectMapped indicates that this cache is directly mapped from
|
||||
// address, rather than using a hash function.
|
||||
DirectMapped bool
|
||||
}
|
||||
|
||||
// Caches describes the caches on the CPU.
|
||||
//
|
||||
// Only supported on Intel; requires allocation.
|
||||
func (fs FeatureSet) Caches() (caches []Cache) {
|
||||
if !fs.Intel() {
|
||||
return
|
||||
}
|
||||
// Check against the cache line, which should be consistent.
|
||||
cacheLine := fs.CacheLine()
|
||||
for i := uint32(0); ; i++ {
|
||||
out := fs.Query(In{
|
||||
Eax: uint32(intelDeterministicCacheParams),
|
||||
Ecx: i,
|
||||
})
|
||||
t := CacheType(out.Eax & 0xf)
|
||||
if t == cacheNull {
|
||||
break
|
||||
}
|
||||
lineSize := (out.Ebx & 0xfff) + 1
|
||||
if lineSize != cacheLine {
|
||||
panic(fmt.Sprintf("Mismatched cache line size: %d vs %d", lineSize, cacheLine))
|
||||
}
|
||||
caches = append(caches, Cache{
|
||||
Type: t,
|
||||
Level: (out.Eax >> 5) & 0x7,
|
||||
FullyAssociative: ((out.Eax >> 9) & 1) == 1,
|
||||
Partitions: ((out.Ebx >> 12) & 0x3ff) + 1,
|
||||
Ways: ((out.Ebx >> 22) & 0x3ff) + 1,
|
||||
Sets: out.Ecx + 1,
|
||||
InvalidateHierarchical: (out.Edx & 1) == 0,
|
||||
Inclusive: ((out.Edx >> 1) & 1) == 1,
|
||||
DirectMapped: ((out.Edx >> 2) & 1) == 0,
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CacheLine is the size of a cache line in bytes.
|
||||
//
|
||||
// All caches use the same line size. This is not enforced in the CPUID
|
||||
// encoding, but is true on all known x86 processors.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) CacheLine() uint32 {
|
||||
_, bx, _, _ := fs.query(featureInfo)
|
||||
return 8 * (bx >> 8) & 0xff
|
||||
}
|
||||
|
||||
// HasFeature tests whether or not a feature is in the given feature set.
|
||||
//
|
||||
// This function is safe to call from a nosplit context, as long as the
|
||||
// FeatureSet does not have any masked features.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) HasFeature(feature Feature) bool {
|
||||
return feature.check(fs)
|
||||
}
|
||||
|
||||
// WriteCPUInfoTo is to generate a section of one cpu in /proc/cpuinfo. This is
|
||||
// a minimal /proc/cpuinfo, it is missing some fields like "microcode" that are
|
||||
// not always printed in Linux. The bogomips field is simply made up.
|
||||
func (fs FeatureSet) WriteCPUInfoTo(cpu, numCPU uint, w io.Writer) {
|
||||
// Avoid many redundant calls here, since this can occasionally appear
|
||||
// in the hot path. Read all basic information up front, see above.
|
||||
ax, _, _, _ := fs.query(featureInfo)
|
||||
ef, em, _, f, m, _ := signatureSplit(ax)
|
||||
vendor := fs.VendorID()
|
||||
fmt.Fprintf(w, "processor\t: %d\n", cpu)
|
||||
fmt.Fprintf(w, "vendor_id\t: %s\n", string(vendor[:]))
|
||||
fmt.Fprintf(w, "cpu family\t: %d\n", ((ef<<4)&0xff)|f)
|
||||
fmt.Fprintf(w, "model\t\t: %d\n", ((em<<4)&0xff)|m)
|
||||
fmt.Fprintf(w, "model name\t: %s\n", "unknown") // Unknown for now.
|
||||
fmt.Fprintf(w, "stepping\t: %s\n", "unknown") // Unknown for now.
|
||||
fmt.Fprintf(w, "cpu MHz\t\t: %.3f\n", cpuFreqMHz)
|
||||
fmt.Fprintf(w, "physical id\t: 0\n") // Pretend all CPUs are in the same socket.
|
||||
fmt.Fprintf(w, "siblings\t: %d\n", numCPU)
|
||||
fmt.Fprintf(w, "core id\t\t: %d\n", cpu)
|
||||
fmt.Fprintf(w, "cpu cores\t: %d\n", numCPU) // Pretend each CPU is a distinct core (rather than a hyperthread).
|
||||
fmt.Fprintf(w, "apicid\t\t: %d\n", cpu)
|
||||
fmt.Fprintf(w, "initial apicid\t: %d\n", cpu)
|
||||
fmt.Fprintf(w, "fpu\t\t: yes\n")
|
||||
fmt.Fprintf(w, "fpu_exception\t: yes\n")
|
||||
fmt.Fprintf(w, "cpuid level\t: %d\n", uint32(xSaveInfo)) // Same as ax in vendorID.
|
||||
fmt.Fprintf(w, "wp\t\t: yes\n")
|
||||
fmt.Fprintf(w, "flags\t\t: %s\n", fs.FlagString())
|
||||
fmt.Fprintf(w, "bogomips\t: %.02f\n", cpuFreqMHz) // It's bogus anyway.
|
||||
fmt.Fprintf(w, "clflush size\t: %d\n", fs.CacheLine())
|
||||
fmt.Fprintf(w, "cache_alignment\t: %d\n", fs.CacheLine())
|
||||
fmt.Fprintf(w, "address sizes\t: %d bits physical, %d bits virtual\n", 46, 48)
|
||||
fmt.Fprintf(w, "power management:\n") // This is always here, but can be blank.
|
||||
fmt.Fprintf(w, "\n") // The /proc/cpuinfo file ends with an extra newline.
|
||||
}
|
||||
|
||||
var (
|
||||
authenticAMD = [12]byte{'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D'}
|
||||
genuineIntel = [12]byte{'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l'}
|
||||
)
|
||||
|
||||
// AMD returns true if fs describes an AMD CPU.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) AMD() bool {
|
||||
return fs.VendorID() == authenticAMD
|
||||
}
|
||||
|
||||
// Intel returns true if fs describes an Intel CPU.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) Intel() bool {
|
||||
return fs.VendorID() == genuineIntel
|
||||
}
|
||||
|
||||
// Leaf 0 of xsaveinfo function returns the size for currently
|
||||
// enabled xsave features in ebx, the maximum size if all valid
|
||||
// features are saved with xsave in ecx, and valid XCR0 bits in
|
||||
// edx:eax.
|
||||
//
|
||||
// If xSaveInfo isn't supported, cpuid will not fault but will
|
||||
// return bogus values.
|
||||
var (
|
||||
xsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ebx
|
||||
maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx
|
||||
amxTileCfgSize = native(In{Eax: uint32(xSaveInfo), Ecx: 17}).Eax
|
||||
amxTileDataSize = native(In{Eax: uint32(xSaveInfo), Ecx: 18}).Eax
|
||||
)
|
||||
|
||||
const (
|
||||
// XCR0AMXMask are the bits that enable xsave to operate on AMX TILECFG
|
||||
// and TILEDATA.
|
||||
//
|
||||
// Note: TILECFG and TILEDATA are always either both enabled or both
|
||||
// disabled.
|
||||
//
|
||||
// See Intel® 64 and IA-32 Architectures Software Developer’s Manual Vol.1
|
||||
// section 13.3 for details.
|
||||
XCR0AMXMask = uint64((1 << 17) | (1 << 18))
|
||||
)
|
||||
|
||||
// ExtendedStateSize returns the number of bytes needed to save the "extended
|
||||
// state" for the enabled features and the boundary it must be aligned to.
|
||||
// Extended state includes floating point registers, and other cpu state that's
|
||||
// not associated with the normal task context.
|
||||
//
|
||||
// Note: the return value matches the size of signal FP state frames.
|
||||
// Look at check_xstate_in_sigframe() in the kernel sources for more details.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) ExtendedStateSize() (size, align uint) {
|
||||
if fs.UseXsave() {
|
||||
return uint(xsaveSize), 64
|
||||
}
|
||||
|
||||
// If we don't support xsave, we fall back to fxsave, which requires
|
||||
// 512 bytes aligned to 16 bytes.
|
||||
return 512, 16
|
||||
}
|
||||
|
||||
// AMXExtendedStateSize returns the number of bytes within the "extended state"
|
||||
// area that is used for AMX.
|
||||
func (fs FeatureSet) AMXExtendedStateSize() uint {
|
||||
if fs.UseXsave() {
|
||||
xcr0 := xgetbv(0)
|
||||
if (xcr0 & XCR0AMXMask) != 0 {
|
||||
return uint(amxTileCfgSize + amxTileDataSize)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ValidXCR0Mask returns the valid bits in control register XCR0.
|
||||
//
|
||||
// Always exclude AMX bits, because we do not support it.
|
||||
// TODO(gvisor.dev/issues/9896): Implement AMX Support.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) ValidXCR0Mask() uint64 {
|
||||
if !fs.HasFeature(X86FeatureXSAVE) {
|
||||
return 0
|
||||
}
|
||||
ax, _, _, dx := fs.query(xSaveInfo)
|
||||
return (uint64(dx)<<32 | uint64(ax)) &^ XCR0AMXMask
|
||||
}
|
||||
|
||||
// UseXsave returns the choice of fp state saving instruction.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) UseXsave() bool {
|
||||
return fs.HasFeature(X86FeatureXSAVE) && fs.HasFeature(X86FeatureOSXSAVE)
|
||||
}
|
||||
|
||||
// UseXsaveopt returns true if 'fs' supports the "xsaveopt" instruction.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) UseXsaveopt() bool {
|
||||
return fs.UseXsave() && fs.HasFeature(X86FeatureXSAVEOPT)
|
||||
}
|
||||
|
||||
// UseXsavec returns true if 'fs' supports the "xsavec" instruction.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) UseXsavec() bool {
|
||||
return fs.UseXsaveopt() && fs.HasFeature(X86FeatureXSAVEC)
|
||||
}
|
||||
|
||||
// UseFSGSBASE returns true if 'fs' supports the (RD|WR)(FS|GS)BASE instructions.
|
||||
func (fs FeatureSet) UseFSGSBASE() bool {
|
||||
HWCAP2_FSGSBASE := uint64(1) << 1
|
||||
return fs.HasFeature(X86FeatureFSGSBase) && ((fs.hwCap.hwCap2 & HWCAP2_FSGSBASE) != 0)
|
||||
}
|
||||
|
||||
// archCheckHostCompatible checks for compatibility.
|
||||
func (fs FeatureSet) archCheckHostCompatible(hfs FeatureSet) error {
|
||||
// The size of a cache line must match, as it is critical to correctly
|
||||
// utilizing CLFLUSH. Other cache properties are allowed to change, as
|
||||
// they are not important to correctness.
|
||||
fsCache := fs.CacheLine()
|
||||
hostCache := hfs.CacheLine()
|
||||
if fsCache != hostCache {
|
||||
return &ErrIncompatible{
|
||||
reason: fmt.Sprintf("CPU cache line size %d incompatible with host cache line size %d", fsCache, hostCache),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
110
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go
vendored
Normal file
110
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build amd64 && amd64 && amd64 && amd64
|
||||
// +build amd64,amd64,amd64,amd64
|
||||
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (fs *FeatureSet) StateTypeName() string {
|
||||
return "pkg/cpuid.FeatureSet"
|
||||
}
|
||||
|
||||
func (fs *FeatureSet) StateFields() []string {
|
||||
return []string{
|
||||
"Function",
|
||||
"hwCap",
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FeatureSet) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
|
||||
fs.beforeSave()
|
||||
var FunctionValue Static
|
||||
FunctionValue = fs.saveFunction()
|
||||
stateSinkObject.SaveValue(0, FunctionValue)
|
||||
stateSinkObject.Save(1, &fs.hwCap)
|
||||
}
|
||||
|
||||
func (fs *FeatureSet) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(1, &fs.hwCap)
|
||||
stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(ctx, y.(Static)) })
|
||||
}
|
||||
|
||||
func (i *In) StateTypeName() string {
|
||||
return "pkg/cpuid.In"
|
||||
}
|
||||
|
||||
func (i *In) StateFields() []string {
|
||||
return []string{
|
||||
"Eax",
|
||||
"Ecx",
|
||||
}
|
||||
}
|
||||
|
||||
func (i *In) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *In) StateSave(stateSinkObject state.Sink) {
|
||||
i.beforeSave()
|
||||
stateSinkObject.Save(0, &i.Eax)
|
||||
stateSinkObject.Save(1, &i.Ecx)
|
||||
}
|
||||
|
||||
func (i *In) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (i *In) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &i.Eax)
|
||||
stateSourceObject.Load(1, &i.Ecx)
|
||||
}
|
||||
|
||||
func (o *Out) StateTypeName() string {
|
||||
return "pkg/cpuid.Out"
|
||||
}
|
||||
|
||||
func (o *Out) StateFields() []string {
|
||||
return []string{
|
||||
"Eax",
|
||||
"Ebx",
|
||||
"Ecx",
|
||||
"Edx",
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Out) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (o *Out) StateSave(stateSinkObject state.Sink) {
|
||||
o.beforeSave()
|
||||
stateSinkObject.Save(0, &o.Eax)
|
||||
stateSinkObject.Save(1, &o.Ebx)
|
||||
stateSinkObject.Save(2, &o.Ecx)
|
||||
stateSinkObject.Save(3, &o.Edx)
|
||||
}
|
||||
|
||||
func (o *Out) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (o *Out) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &o.Eax)
|
||||
stateSourceObject.Load(1, &o.Ebx)
|
||||
stateSourceObject.Load(2, &o.Ecx)
|
||||
stateSourceObject.Load(3, &o.Edx)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*FeatureSet)(nil))
|
||||
state.Register((*In)(nil))
|
||||
state.Register((*Out)(nil))
|
||||
}
|
||||
110
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go
vendored
Normal file
110
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// FeatureSet for ARM64 is defined as a static set of bits.
|
||||
//
|
||||
// ARM64 doesn't have a CPUID equivalent, which means it has no architected
|
||||
// discovery mechanism for hardware features available to userspace code at
|
||||
// EL0. The kernel exposes the presence of these features to userspace through
|
||||
// a set of flags(HWCAP/HWCAP2) bits, exposed in the auxiliary vector. See
|
||||
// Documentation/arm64/elf_hwcaps.rst for more info.
|
||||
//
|
||||
// Currently, only the HWCAP bits are supported.
|
||||
//
|
||||
// +stateify savable
|
||||
type FeatureSet struct {
|
||||
hwCap hwCap
|
||||
cpuFreqMHz float64
|
||||
cpuImplHex uint64
|
||||
cpuArchDec uint64
|
||||
cpuVarHex uint64
|
||||
cpuPartHex uint64
|
||||
cpuRevDec uint64
|
||||
}
|
||||
|
||||
// CPUImplementer is part of the processor signature.
|
||||
func (fs FeatureSet) CPUImplementer() uint8 {
|
||||
return uint8(fs.cpuImplHex)
|
||||
}
|
||||
|
||||
// CPUArchitecture is part of the processor signature.
|
||||
func (fs FeatureSet) CPUArchitecture() uint8 {
|
||||
return uint8(fs.cpuArchDec)
|
||||
}
|
||||
|
||||
// CPUVariant is part of the processor signature.
|
||||
func (fs FeatureSet) CPUVariant() uint8 {
|
||||
return uint8(fs.cpuVarHex)
|
||||
}
|
||||
|
||||
// CPUPartnum is part of the processor signature.
|
||||
func (fs FeatureSet) CPUPartnum() uint16 {
|
||||
return uint16(fs.cpuPartHex)
|
||||
}
|
||||
|
||||
// CPURevision is part of the processor signature.
|
||||
func (fs FeatureSet) CPURevision() uint8 {
|
||||
return uint8(fs.cpuRevDec)
|
||||
}
|
||||
|
||||
// ExtendedStateSize returns the number of bytes needed to save the "extended
|
||||
// state" for this processor and the boundary it must be aligned to. Extended
|
||||
// state includes floating point(NEON) registers, and other cpu state that's not
|
||||
// associated with the normal task context.
|
||||
func (fs FeatureSet) ExtendedStateSize() (size, align uint) {
|
||||
// ARMv8 provide 32x128bits NEON registers.
|
||||
//
|
||||
// Ref arch/arm64/include/uapi/asm/ptrace.h
|
||||
// struct user_fpsimd_state {
|
||||
// __uint128_t vregs[32];
|
||||
// __u32 fpsr;
|
||||
// __u32 fpcr;
|
||||
// __u32 __reserved[2];
|
||||
// };
|
||||
return 528, 16
|
||||
}
|
||||
|
||||
// HasFeature checks for the presence of a feature.
|
||||
func (fs FeatureSet) HasFeature(feature Feature) bool {
|
||||
return fs.hwCap.hwCap1&(1<<feature) != 0
|
||||
}
|
||||
|
||||
// WriteCPUInfoTo is to generate a section of one cpu in /proc/cpuinfo. This is
|
||||
// a minimal /proc/cpuinfo, and the bogomips field is simply made up.
|
||||
func (fs FeatureSet) WriteCPUInfoTo(cpu, numCPU uint, w io.Writer) {
|
||||
fmt.Fprintf(w, "processor\t: %d\n", cpu)
|
||||
fmt.Fprintf(w, "BogoMIPS\t: %.02f\n", fs.cpuFreqMHz) // It's bogus anyway.
|
||||
fmt.Fprintf(w, "Features\t\t: %s\n", fs.FlagString())
|
||||
fmt.Fprintf(w, "CPU implementer\t: 0x%x\n", fs.cpuImplHex)
|
||||
fmt.Fprintf(w, "CPU architecture\t: %d\n", fs.cpuArchDec)
|
||||
fmt.Fprintf(w, "CPU variant\t: 0x%x\n", fs.cpuVarHex)
|
||||
fmt.Fprintf(w, "CPU part\t: 0x%x\n", fs.cpuPartHex)
|
||||
fmt.Fprintf(w, "CPU revision\t: %d\n", fs.cpuRevDec)
|
||||
fmt.Fprintf(w, "\n") // The /proc/cpuinfo file ends with an extra newline.
|
||||
}
|
||||
|
||||
// archCheckHostCompatible is a noop on arm64.
|
||||
func (FeatureSet) archCheckHostCompatible(FeatureSet) error {
|
||||
return nil
|
||||
}
|
||||
59
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go
vendored
Normal file
59
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build arm64 && arm64 && arm64
|
||||
// +build arm64,arm64,arm64
|
||||
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (fs *FeatureSet) StateTypeName() string {
|
||||
return "pkg/cpuid.FeatureSet"
|
||||
}
|
||||
|
||||
func (fs *FeatureSet) StateFields() []string {
|
||||
return []string{
|
||||
"hwCap",
|
||||
"cpuFreqMHz",
|
||||
"cpuImplHex",
|
||||
"cpuArchDec",
|
||||
"cpuVarHex",
|
||||
"cpuPartHex",
|
||||
"cpuRevDec",
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FeatureSet) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
|
||||
fs.beforeSave()
|
||||
stateSinkObject.Save(0, &fs.hwCap)
|
||||
stateSinkObject.Save(1, &fs.cpuFreqMHz)
|
||||
stateSinkObject.Save(2, &fs.cpuImplHex)
|
||||
stateSinkObject.Save(3, &fs.cpuArchDec)
|
||||
stateSinkObject.Save(4, &fs.cpuVarHex)
|
||||
stateSinkObject.Save(5, &fs.cpuPartHex)
|
||||
stateSinkObject.Save(6, &fs.cpuRevDec)
|
||||
}
|
||||
|
||||
func (fs *FeatureSet) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &fs.hwCap)
|
||||
stateSourceObject.Load(1, &fs.cpuFreqMHz)
|
||||
stateSourceObject.Load(2, &fs.cpuImplHex)
|
||||
stateSourceObject.Load(3, &fs.cpuArchDec)
|
||||
stateSourceObject.Load(4, &fs.cpuVarHex)
|
||||
stateSourceObject.Load(5, &fs.cpuPartHex)
|
||||
stateSourceObject.Load(6, &fs.cpuRevDec)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*FeatureSet)(nil))
|
||||
}
|
||||
41
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go
vendored
Normal file
41
vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (h *hwCap) StateTypeName() string {
|
||||
return "pkg/cpuid.hwCap"
|
||||
}
|
||||
|
||||
func (h *hwCap) StateFields() []string {
|
||||
return []string{
|
||||
"hwCap1",
|
||||
"hwCap2",
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hwCap) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (h *hwCap) StateSave(stateSinkObject state.Sink) {
|
||||
h.beforeSave()
|
||||
stateSinkObject.Save(0, &h.hwCap1)
|
||||
stateSinkObject.Save(1, &h.hwCap2)
|
||||
}
|
||||
|
||||
func (h *hwCap) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (h *hwCap) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &h.hwCap1)
|
||||
stateSourceObject.Load(1, &h.hwCap2)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*hwCap)(nil))
|
||||
}
|
||||
664
vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go
vendored
Normal file
664
vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go
vendored
Normal file
@@ -0,0 +1,664 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64
|
||||
// +build amd64
|
||||
|
||||
package cpuid
|
||||
|
||||
// block is a collection of 32 Feature bits.
|
||||
type block int
|
||||
|
||||
// blockSize is the number of bits in a single block.
|
||||
const blockSize = 32
|
||||
|
||||
// featureID returns the feature identified by the given block and bit.
|
||||
//
|
||||
// Feature bits are numbered according to "blocks". Each block is 32 bits, and
|
||||
// feature bits from the same source (cpuid leaf/level) are in the same block.
|
||||
func featureID(b block, bit int) Feature {
|
||||
return Feature(blockSize*int(b) + bit)
|
||||
}
|
||||
|
||||
// block returns the block associated with the feature.
|
||||
func (f Feature) block() block {
|
||||
return block(f / blockSize)
|
||||
}
|
||||
|
||||
// Bit returns the bit associated with the feature.
|
||||
func (f Feature) bit() uint32 {
|
||||
return uint32(1 << (f % blockSize))
|
||||
}
|
||||
|
||||
// ChangeableSet is a feature set that can allows changes.
|
||||
type ChangeableSet interface {
|
||||
Query(in In) Out
|
||||
Set(in In, out Out)
|
||||
}
|
||||
|
||||
// Set sets the given feature.
|
||||
func (f Feature) Set(s ChangeableSet) {
|
||||
f.set(s, true)
|
||||
}
|
||||
|
||||
// Unset unsets the given feature.
|
||||
func (f Feature) Unset(s ChangeableSet) {
|
||||
f.set(s, false)
|
||||
}
|
||||
|
||||
// set sets the given feature.
|
||||
func (f Feature) set(s ChangeableSet, on bool) {
|
||||
switch f.block() {
|
||||
case 0:
|
||||
out := s.Query(In{Eax: uint32(featureInfo)})
|
||||
if on {
|
||||
out.Ecx |= f.bit()
|
||||
} else {
|
||||
out.Ecx &^= f.bit()
|
||||
}
|
||||
s.Set(In{Eax: uint32(featureInfo)}, out)
|
||||
case 1:
|
||||
out := s.Query(In{Eax: uint32(featureInfo)})
|
||||
if on {
|
||||
out.Edx |= f.bit()
|
||||
} else {
|
||||
out.Edx &^= f.bit()
|
||||
}
|
||||
s.Set(In{Eax: uint32(featureInfo)}, out)
|
||||
case 2:
|
||||
out := s.Query(In{Eax: uint32(extendedFeatureInfo)})
|
||||
if on {
|
||||
out.Ebx |= f.bit()
|
||||
} else {
|
||||
out.Ebx &^= f.bit()
|
||||
}
|
||||
s.Set(In{Eax: uint32(extendedFeatureInfo)}, out)
|
||||
case 3:
|
||||
out := s.Query(In{Eax: uint32(extendedFeatureInfo)})
|
||||
if on {
|
||||
out.Ecx |= f.bit()
|
||||
} else {
|
||||
out.Ecx &^= f.bit()
|
||||
}
|
||||
s.Set(In{Eax: uint32(extendedFeatureInfo)}, out)
|
||||
case 4:
|
||||
// Need to turn on the bit in block 0.
|
||||
out := s.Query(In{Eax: uint32(featureInfo)})
|
||||
out.Ecx |= (1 << 26)
|
||||
s.Set(In{Eax: uint32(featureInfo)}, out)
|
||||
|
||||
out = s.Query(In{Eax: xSaveInfoSub.eax(), Ecx: xSaveInfoSub.ecx()})
|
||||
if on {
|
||||
out.Eax |= f.bit()
|
||||
} else {
|
||||
out.Eax &^= f.bit()
|
||||
}
|
||||
s.Set(In{Eax: xSaveInfoSub.eax(), Ecx: xSaveInfoSub.ecx()}, out)
|
||||
case 5, 6:
|
||||
// Need to enable extended features.
|
||||
out := s.Query(In{Eax: uint32(extendedFunctionInfo)})
|
||||
if out.Eax < uint32(extendedFeatures) {
|
||||
out.Eax = uint32(extendedFeatures)
|
||||
}
|
||||
s.Set(In{Eax: uint32(extendedFunctionInfo)}, out)
|
||||
out = s.Query(In{Eax: uint32(extendedFeatures)})
|
||||
if f.block() == 5 {
|
||||
if on {
|
||||
out.Ecx |= f.bit()
|
||||
} else {
|
||||
out.Ecx &^= f.bit()
|
||||
}
|
||||
} else {
|
||||
if on {
|
||||
out.Edx |= f.bit()
|
||||
} else {
|
||||
out.Edx &^= f.bit()
|
||||
}
|
||||
}
|
||||
s.Set(In{Eax: uint32(extendedFeatures)}, out)
|
||||
case 7:
|
||||
out := s.Query(In{Eax: uint32(extendedFeatureInfo)})
|
||||
if on {
|
||||
out.Edx |= f.bit()
|
||||
} else {
|
||||
out.Edx &^= f.bit()
|
||||
}
|
||||
s.Set(In{Eax: uint32(extendedFeatureInfo)}, out)
|
||||
}
|
||||
}
|
||||
|
||||
// check checks for the given feature.
|
||||
//
|
||||
//go:nosplit
|
||||
func (f Feature) check(fs FeatureSet) bool {
|
||||
switch f.block() {
|
||||
case 0:
|
||||
_, _, cx, _ := fs.query(featureInfo)
|
||||
return (cx & f.bit()) != 0
|
||||
case 1:
|
||||
_, _, _, dx := fs.query(featureInfo)
|
||||
return (dx & f.bit()) != 0
|
||||
case 2:
|
||||
_, bx, _, _ := fs.query(extendedFeatureInfo)
|
||||
return (bx & f.bit()) != 0
|
||||
case 3:
|
||||
_, _, cx, _ := fs.query(extendedFeatureInfo)
|
||||
return (cx & f.bit()) != 0
|
||||
case 4:
|
||||
// Need to check appropriate bit in block 0.
|
||||
_, _, cx, _ := fs.query(featureInfo)
|
||||
if (cx & (1 << 26)) == 0 {
|
||||
return false
|
||||
}
|
||||
ax, _, _, _ := fs.query(xSaveInfoSub)
|
||||
return (ax & f.bit()) != 0
|
||||
case 5, 6:
|
||||
// eax=0x80000000 gets supported extended levels. We use this
|
||||
// to determine if there are any non-zero block 4 or block 6
|
||||
// bits to find.
|
||||
ax, _, _, _ := fs.query(extendedFunctionInfo)
|
||||
if ax >= uint32(extendedFeatures) {
|
||||
_, _, cx, dx := fs.query(extendedFeatures)
|
||||
if f.block() == 5 {
|
||||
return (cx & f.bit()) != 0
|
||||
}
|
||||
// Ignore features duplicated from block 1 on AMD.
|
||||
// These bits are reserved on Intel.
|
||||
return ((dx &^ block6DuplicateMask) & f.bit()) != 0
|
||||
}
|
||||
return false
|
||||
case 7:
|
||||
_, _, _, dx := fs.query(extendedFeatureInfo)
|
||||
return (dx & f.bit()) != 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Block 0 constants are all of the "basic" feature bits returned by a cpuid in
|
||||
// ecx with eax=1.
|
||||
const (
|
||||
X86FeatureSSE3 Feature = iota
|
||||
X86FeaturePCLMULDQ
|
||||
X86FeatureDTES64
|
||||
X86FeatureMONITOR
|
||||
X86FeatureDSCPL
|
||||
X86FeatureVMX
|
||||
X86FeatureSMX
|
||||
X86FeatureEST
|
||||
X86FeatureTM2
|
||||
X86FeatureSSSE3 // Not a typo, "supplemental" SSE3.
|
||||
X86FeatureCNXTID
|
||||
X86FeatureSDBG
|
||||
X86FeatureFMA
|
||||
X86FeatureCX16
|
||||
X86FeatureXTPR
|
||||
X86FeaturePDCM
|
||||
_ // ecx bit 16 is reserved.
|
||||
X86FeaturePCID
|
||||
X86FeatureDCA
|
||||
X86FeatureSSE4_1
|
||||
X86FeatureSSE4_2
|
||||
X86FeatureX2APIC
|
||||
X86FeatureMOVBE
|
||||
X86FeaturePOPCNT
|
||||
X86FeatureTSCD
|
||||
X86FeatureAES
|
||||
X86FeatureXSAVE
|
||||
X86FeatureOSXSAVE
|
||||
X86FeatureAVX
|
||||
X86FeatureF16C
|
||||
X86FeatureRDRAND
|
||||
X86FeatureHypervisor
|
||||
)
|
||||
|
||||
// Block 1 constants are all of the "basic" feature bits returned by a cpuid in
|
||||
// edx with eax=1.
|
||||
const (
|
||||
X86FeatureFPU Feature = 32 + iota
|
||||
X86FeatureVME
|
||||
X86FeatureDE
|
||||
X86FeaturePSE
|
||||
X86FeatureTSC
|
||||
X86FeatureMSR
|
||||
X86FeaturePAE
|
||||
X86FeatureMCE
|
||||
X86FeatureCX8
|
||||
X86FeatureAPIC
|
||||
_ // edx bit 10 is reserved.
|
||||
X86FeatureSEP
|
||||
X86FeatureMTRR
|
||||
X86FeaturePGE
|
||||
X86FeatureMCA
|
||||
X86FeatureCMOV
|
||||
X86FeaturePAT
|
||||
X86FeaturePSE36
|
||||
X86FeaturePSN
|
||||
X86FeatureCLFSH
|
||||
_ // edx bit 20 is reserved.
|
||||
X86FeatureDS
|
||||
X86FeatureACPI
|
||||
X86FeatureMMX
|
||||
X86FeatureFXSR
|
||||
X86FeatureSSE
|
||||
X86FeatureSSE2
|
||||
X86FeatureSS
|
||||
X86FeatureHTT
|
||||
X86FeatureTM
|
||||
X86FeatureIA64
|
||||
X86FeaturePBE
|
||||
)
|
||||
|
||||
// Block 2 bits are the "structured extended" features returned in ebx for
|
||||
// eax=7, ecx=0.
|
||||
const (
|
||||
X86FeatureFSGSBase Feature = 2*32 + iota
|
||||
X86FeatureTSC_ADJUST
|
||||
_ // ebx bit 2 is reserved.
|
||||
X86FeatureBMI1
|
||||
X86FeatureHLE
|
||||
X86FeatureAVX2
|
||||
X86FeatureFDP_EXCPTN_ONLY
|
||||
X86FeatureSMEP
|
||||
X86FeatureBMI2
|
||||
X86FeatureERMS
|
||||
X86FeatureINVPCID
|
||||
X86FeatureRTM
|
||||
X86FeatureCQM
|
||||
X86FeatureFPCSDS
|
||||
X86FeatureMPX
|
||||
X86FeatureRDT
|
||||
X86FeatureAVX512F
|
||||
X86FeatureAVX512DQ
|
||||
X86FeatureRDSEED
|
||||
X86FeatureADX
|
||||
X86FeatureSMAP
|
||||
X86FeatureAVX512IFMA
|
||||
X86FeaturePCOMMIT
|
||||
X86FeatureCLFLUSHOPT
|
||||
X86FeatureCLWB
|
||||
X86FeatureIPT // Intel processor trace.
|
||||
X86FeatureAVX512PF
|
||||
X86FeatureAVX512ER
|
||||
X86FeatureAVX512CD
|
||||
X86FeatureSHA
|
||||
X86FeatureAVX512BW
|
||||
X86FeatureAVX512VL
|
||||
)
|
||||
|
||||
// Block 3 bits are the "extended" features returned in ecx for eax=7, ecx=0.
|
||||
const (
|
||||
X86FeaturePREFETCHWT1 Feature = 3*32 + iota
|
||||
X86FeatureAVX512VBMI
|
||||
X86FeatureUMIP
|
||||
X86FeaturePKU
|
||||
X86FeatureOSPKE
|
||||
X86FeatureWAITPKG
|
||||
X86FeatureAVX512_VBMI2
|
||||
X86FeatureCET_SS
|
||||
X86FeatureGFNI
|
||||
X86FeatureVAES
|
||||
X86FeatureVPCLMULQDQ
|
||||
X86FeatureAVX512_VNNI
|
||||
X86FeatureAVX512_BITALG
|
||||
X86FeatureTME
|
||||
X86FeatureAVX512_VPOPCNTDQ
|
||||
_ // ecx bit 15 is reserved
|
||||
X86FeatureLA57
|
||||
// ecx bits 17-21 are reserved
|
||||
_
|
||||
_
|
||||
_
|
||||
_
|
||||
_
|
||||
X86FeatureRDPID
|
||||
// ecx bits 23-24 are reserved
|
||||
_
|
||||
_
|
||||
X86FeatureCLDEMOTE
|
||||
_ // ecx bit 26 is reserved
|
||||
X86FeatureMOVDIRI
|
||||
X86FeatureMOVDIR64B
|
||||
)
|
||||
|
||||
// Block 4 constants are for xsave capabilities in CPUID.(EAX=0DH,ECX=01H):EAX.
|
||||
// The CPUID leaf is available only if 'X86FeatureXSAVE' is present.
|
||||
const (
|
||||
X86FeatureXSAVEOPT Feature = 4*32 + iota
|
||||
X86FeatureXSAVEC
|
||||
X86FeatureXGETBV1
|
||||
X86FeatureXSAVES
|
||||
// EAX[31:4] are reserved.
|
||||
)
|
||||
|
||||
// Block 5 constants are the extended feature bits in
|
||||
// CPUID.(EAX=0x80000001):ECX.
|
||||
const (
|
||||
X86FeatureLAHF64 Feature = 5*32 + iota
|
||||
X86FeatureCMP_LEGACY
|
||||
X86FeatureSVM
|
||||
X86FeatureEXTAPIC
|
||||
X86FeatureCR8_LEGACY
|
||||
X86FeatureLZCNT
|
||||
X86FeatureSSE4A
|
||||
X86FeatureMISALIGNSSE
|
||||
X86FeaturePREFETCHW
|
||||
X86FeatureOSVW
|
||||
X86FeatureIBS
|
||||
X86FeatureXOP
|
||||
X86FeatureSKINIT
|
||||
X86FeatureWDT
|
||||
_ // ecx bit 14 is reserved.
|
||||
X86FeatureLWP
|
||||
X86FeatureFMA4
|
||||
X86FeatureTCE
|
||||
_ // ecx bit 18 is reserved.
|
||||
_ // ecx bit 19 is reserved.
|
||||
_ // ecx bit 20 is reserved.
|
||||
X86FeatureTBM
|
||||
X86FeatureTOPOLOGY
|
||||
X86FeaturePERFCTR_CORE
|
||||
X86FeaturePERFCTR_NB
|
||||
_ // ecx bit 25 is reserved.
|
||||
X86FeatureBPEXT
|
||||
X86FeaturePERFCTR_TSC
|
||||
X86FeaturePERFCTR_LLC
|
||||
X86FeatureMWAITX
|
||||
X86FeatureADMSKEXTN
|
||||
_ // ecx bit 31 is reserved.
|
||||
)
|
||||
|
||||
// Block 6 constants are the extended feature bits in
|
||||
// CPUID.(EAX=0x80000001):EDX.
|
||||
//
|
||||
// These are sparse, and so the bit positions are assigned manually.
|
||||
const (
|
||||
// On AMD, EDX[24:23] | EDX[17:12] | EDX[9:0] are duplicate features
|
||||
// also defined in block 1 (in identical bit positions). Those features
|
||||
// are not listed here.
|
||||
block6DuplicateMask = 0x183f3ff
|
||||
|
||||
X86FeatureSYSCALL Feature = 6*32 + 11
|
||||
X86FeatureNX Feature = 6*32 + 20
|
||||
X86FeatureMMXEXT Feature = 6*32 + 22
|
||||
X86FeatureFXSR_OPT Feature = 6*32 + 25
|
||||
X86FeatureGBPAGES Feature = 6*32 + 26
|
||||
X86FeatureRDTSCP Feature = 6*32 + 27
|
||||
X86FeatureLM Feature = 6*32 + 29
|
||||
X86Feature3DNOWEXT Feature = 6*32 + 30
|
||||
X86Feature3DNOW Feature = 6*32 + 31
|
||||
)
|
||||
|
||||
// Block 7 constants are the extended features bits in
|
||||
// CPUID.(EAX=07H,ECX=0):EDX.
|
||||
const (
|
||||
_ Feature = 7*32 + iota // edx bit 0 is reserved.
|
||||
_ // edx bit 1 is reserved.
|
||||
X86FeatureAVX512_4VNNIW
|
||||
X86FeatureAVX512_4FMAPS
|
||||
X86FeatureFSRM
|
||||
_ // edx bit 5 is not used in Linux.
|
||||
_ // edx bit 6 is reserved.
|
||||
_ // edx bit 7 is reserved.
|
||||
X86FeatureAVX512_VP2INTERSECT
|
||||
X86FeatureSRBDS_CTRL
|
||||
X86FeatureMD_CLEAR
|
||||
X86FeatureRTM_ALWAYS_ABORT
|
||||
_ // edx bit 12 is reserved.
|
||||
X86FeatureTSX_FORCE_ABORT
|
||||
X86FeatureSERIALIZE
|
||||
X86FeatureHYBRID_CPU
|
||||
X86FeatureTSXLDTRK
|
||||
_ // edx bit 17 is reserved.
|
||||
X86FeaturePCONFIG
|
||||
X86FeatureARCH_LBR
|
||||
X86FeatureIBT
|
||||
_ // edx bit 21 is reserved.
|
||||
X86FeatureAMX_BF16
|
||||
X86FeatureAVX512_FP16
|
||||
X86FeatureAMX_TILE
|
||||
X86FeatureAMX_INT8
|
||||
X86FeatureSPEC_CTRL
|
||||
X86FeatureINTEL_STIBP
|
||||
X86FeatureFLUSH_L1D
|
||||
X86FeatureARCH_CAPABILITIES
|
||||
X86FeatureCORE_CAPABILITIES
|
||||
X86FeatureSPEC_CTRL_SSBD
|
||||
)
|
||||
|
||||
// These are the extended floating point state features. They are used to
|
||||
// enumerate floating point features in XCR0, XSTATE_BV, etc.
|
||||
const (
|
||||
XSAVEFeatureX87 = 1 << 0
|
||||
XSAVEFeatureSSE = 1 << 1
|
||||
XSAVEFeatureAVX = 1 << 2
|
||||
XSAVEFeatureBNDREGS = 1 << 3
|
||||
XSAVEFeatureBNDCSR = 1 << 4
|
||||
XSAVEFeatureAVX512op = 1 << 5
|
||||
XSAVEFeatureAVX512zmm0 = 1 << 6
|
||||
XSAVEFeatureAVX512zmm16 = 1 << 7
|
||||
XSAVEFeaturePKRU = 1 << 9
|
||||
)
|
||||
|
||||
// allFeatures is the set of allFeatures.
|
||||
//
|
||||
// These match names used in arch/x86/kernel/cpu/capflags.c.
|
||||
var allFeatures = map[Feature]allFeatureInfo{
|
||||
// Block 0.
|
||||
X86FeatureSSE3: {"pni", true},
|
||||
X86FeaturePCLMULDQ: {"pclmulqdq", true},
|
||||
X86FeatureDTES64: {"dtes64", true},
|
||||
X86FeatureMONITOR: {"monitor", true},
|
||||
X86FeatureDSCPL: {"ds_cpl", true},
|
||||
X86FeatureVMX: {"vmx", true},
|
||||
X86FeatureSMX: {"smx", true},
|
||||
X86FeatureEST: {"est", true},
|
||||
X86FeatureTM2: {"tm2", true},
|
||||
X86FeatureSSSE3: {"ssse3", true},
|
||||
X86FeatureCNXTID: {"cid", true},
|
||||
X86FeatureSDBG: {"sdbg", true},
|
||||
X86FeatureFMA: {"fma", true},
|
||||
X86FeatureCX16: {"cx16", true},
|
||||
X86FeatureXTPR: {"xtpr", true},
|
||||
X86FeaturePDCM: {"pdcm", true},
|
||||
X86FeaturePCID: {"pcid", true},
|
||||
X86FeatureDCA: {"dca", true},
|
||||
X86FeatureSSE4_1: {"sse4_1", true},
|
||||
X86FeatureSSE4_2: {"sse4_2", true},
|
||||
X86FeatureX2APIC: {"x2apic", true},
|
||||
X86FeatureMOVBE: {"movbe", true},
|
||||
X86FeaturePOPCNT: {"popcnt", true},
|
||||
X86FeatureTSCD: {"tsc_deadline_timer", true},
|
||||
X86FeatureAES: {"aes", true},
|
||||
X86FeatureXSAVE: {"xsave", true},
|
||||
X86FeatureAVX: {"avx", true},
|
||||
X86FeatureF16C: {"f16c", true},
|
||||
X86FeatureRDRAND: {"rdrand", true},
|
||||
X86FeatureHypervisor: {"hypervisor", true},
|
||||
X86FeatureOSXSAVE: {"osxsave", false},
|
||||
|
||||
// Block 1.
|
||||
X86FeatureFPU: {"fpu", true},
|
||||
X86FeatureVME: {"vme", true},
|
||||
X86FeatureDE: {"de", true},
|
||||
X86FeaturePSE: {"pse", true},
|
||||
X86FeatureTSC: {"tsc", true},
|
||||
X86FeatureMSR: {"msr", true},
|
||||
X86FeaturePAE: {"pae", true},
|
||||
X86FeatureMCE: {"mce", true},
|
||||
X86FeatureCX8: {"cx8", true},
|
||||
X86FeatureAPIC: {"apic", true},
|
||||
X86FeatureSEP: {"sep", true},
|
||||
X86FeatureMTRR: {"mtrr", true},
|
||||
X86FeaturePGE: {"pge", true},
|
||||
X86FeatureMCA: {"mca", true},
|
||||
X86FeatureCMOV: {"cmov", true},
|
||||
X86FeaturePAT: {"pat", true},
|
||||
X86FeaturePSE36: {"pse36", true},
|
||||
X86FeaturePSN: {"pn", true},
|
||||
X86FeatureCLFSH: {"clflush", true},
|
||||
X86FeatureDS: {"dts", true},
|
||||
X86FeatureACPI: {"acpi", true},
|
||||
X86FeatureMMX: {"mmx", true},
|
||||
X86FeatureFXSR: {"fxsr", true},
|
||||
X86FeatureSSE: {"sse", true},
|
||||
X86FeatureSSE2: {"sse2", true},
|
||||
X86FeatureSS: {"ss", true},
|
||||
X86FeatureHTT: {"ht", true},
|
||||
X86FeatureTM: {"tm", true},
|
||||
X86FeatureIA64: {"ia64", true},
|
||||
X86FeaturePBE: {"pbe", true},
|
||||
|
||||
// Block 2.
|
||||
X86FeatureFSGSBase: {"fsgsbase", true},
|
||||
X86FeatureTSC_ADJUST: {"tsc_adjust", true},
|
||||
X86FeatureBMI1: {"bmi1", true},
|
||||
X86FeatureHLE: {"hle", true},
|
||||
X86FeatureAVX2: {"avx2", true},
|
||||
X86FeatureSMEP: {"smep", true},
|
||||
X86FeatureBMI2: {"bmi2", true},
|
||||
X86FeatureERMS: {"erms", true},
|
||||
X86FeatureINVPCID: {"invpcid", true},
|
||||
X86FeatureRTM: {"rtm", true},
|
||||
X86FeatureCQM: {"cqm", true},
|
||||
X86FeatureMPX: {"mpx", true},
|
||||
X86FeatureRDT: {"rdt_a", true},
|
||||
X86FeatureAVX512F: {"avx512f", true},
|
||||
X86FeatureAVX512DQ: {"avx512dq", true},
|
||||
X86FeatureRDSEED: {"rdseed", true},
|
||||
X86FeatureADX: {"adx", true},
|
||||
X86FeatureSMAP: {"smap", true},
|
||||
X86FeatureCLWB: {"clwb", true},
|
||||
X86FeatureAVX512PF: {"avx512pf", true},
|
||||
X86FeatureAVX512ER: {"avx512er", true},
|
||||
X86FeatureAVX512CD: {"avx512cd", true},
|
||||
X86FeatureSHA: {"sha_ni", true},
|
||||
X86FeatureAVX512BW: {"avx512bw", true},
|
||||
X86FeatureAVX512VL: {"avx512vl", true},
|
||||
X86FeatureFDP_EXCPTN_ONLY: {"fdp_excptn_only", false},
|
||||
X86FeatureFPCSDS: {"fpcsds", false},
|
||||
X86FeatureIPT: {"ipt", false},
|
||||
X86FeatureCLFLUSHOPT: {"clfushopt", false},
|
||||
|
||||
// Block 3.
|
||||
X86FeatureAVX512VBMI: {"avx512vbmi", true},
|
||||
X86FeatureUMIP: {"umip", true},
|
||||
X86FeaturePKU: {"pku", true},
|
||||
X86FeatureOSPKE: {"ospke", true},
|
||||
X86FeatureWAITPKG: {"waitpkg", true},
|
||||
X86FeatureAVX512_VBMI2: {"avx512_vbmi2", true},
|
||||
X86FeatureGFNI: {"gfni", true},
|
||||
X86FeatureCET_SS: {"cet_ss", false},
|
||||
X86FeatureVAES: {"vaes", true},
|
||||
X86FeatureVPCLMULQDQ: {"vpclmulqdq", true},
|
||||
X86FeatureAVX512_VNNI: {"avx512_vnni", true},
|
||||
X86FeatureAVX512_BITALG: {"avx512_bitalg", true},
|
||||
X86FeatureTME: {"tme", true},
|
||||
X86FeatureAVX512_VPOPCNTDQ: {"avx512_vpopcntdq", true},
|
||||
X86FeatureLA57: {"la57", true},
|
||||
X86FeatureRDPID: {"rdpid", true},
|
||||
X86FeatureCLDEMOTE: {"cldemote", true},
|
||||
X86FeatureMOVDIRI: {"movdiri", true},
|
||||
X86FeatureMOVDIR64B: {"movdir64b", true},
|
||||
X86FeaturePREFETCHWT1: {"prefetchwt1", false},
|
||||
|
||||
// Block 4.
|
||||
X86FeatureXSAVEOPT: {"xsaveopt", true},
|
||||
X86FeatureXSAVEC: {"xsavec", true},
|
||||
X86FeatureXGETBV1: {"xgetbv1", true},
|
||||
X86FeatureXSAVES: {"xsaves", true},
|
||||
|
||||
// Block 5.
|
||||
X86FeatureLAHF64: {"lahf_lm", true}, // LAHF/SAHF in long mode.
|
||||
X86FeatureCMP_LEGACY: {"cmp_legacy", true},
|
||||
X86FeatureSVM: {"svm", true},
|
||||
X86FeatureEXTAPIC: {"extapic", true},
|
||||
X86FeatureCR8_LEGACY: {"cr8_legacy", true},
|
||||
X86FeatureLZCNT: {"abm", true}, // Advanced bit manipulation.
|
||||
X86FeatureSSE4A: {"sse4a", true},
|
||||
X86FeatureMISALIGNSSE: {"misalignsse", true},
|
||||
X86FeaturePREFETCHW: {"3dnowprefetch", true},
|
||||
X86FeatureOSVW: {"osvw", true},
|
||||
X86FeatureIBS: {"ibs", true},
|
||||
X86FeatureXOP: {"xop", true},
|
||||
X86FeatureSKINIT: {"skinit", true},
|
||||
X86FeatureWDT: {"wdt", true},
|
||||
X86FeatureLWP: {"lwp", true},
|
||||
X86FeatureFMA4: {"fma4", true},
|
||||
X86FeatureTCE: {"tce", true},
|
||||
X86FeatureTBM: {"tbm", true},
|
||||
X86FeatureTOPOLOGY: {"topoext", true},
|
||||
X86FeaturePERFCTR_CORE: {"perfctr_core", true},
|
||||
X86FeaturePERFCTR_NB: {"perfctr_nb", true},
|
||||
X86FeatureBPEXT: {"bpext", true},
|
||||
X86FeaturePERFCTR_TSC: {"ptsc", true},
|
||||
X86FeaturePERFCTR_LLC: {"perfctr_llc", true},
|
||||
X86FeatureMWAITX: {"mwaitx", true},
|
||||
X86FeatureADMSKEXTN: {"ad_mask_extn", false},
|
||||
|
||||
// Block 6.
|
||||
X86FeatureSYSCALL: {"syscall", true},
|
||||
X86FeatureNX: {"nx", true},
|
||||
X86FeatureMMXEXT: {"mmxext", true},
|
||||
X86FeatureFXSR_OPT: {"fxsr_opt", true},
|
||||
X86FeatureGBPAGES: {"pdpe1gb", true},
|
||||
X86FeatureRDTSCP: {"rdtscp", true},
|
||||
X86FeatureLM: {"lm", true},
|
||||
X86Feature3DNOWEXT: {"3dnowext", true},
|
||||
X86Feature3DNOW: {"3dnow", true},
|
||||
|
||||
// Block 7.
|
||||
X86FeatureAVX512_4VNNIW: {"avx512_4vnniw", true},
|
||||
X86FeatureAVX512_4FMAPS: {"avx512_4fmaps", true},
|
||||
X86FeatureFSRM: {"fsrm", true},
|
||||
X86FeatureAVX512_VP2INTERSECT: {"avx512_vp2intersect", true},
|
||||
X86FeatureSRBDS_CTRL: {"srbds_ctrl", false},
|
||||
X86FeatureMD_CLEAR: {"md_clear", true},
|
||||
X86FeatureRTM_ALWAYS_ABORT: {"rtm_always_abort", false},
|
||||
X86FeatureTSX_FORCE_ABORT: {"tsx_force_abort", false},
|
||||
X86FeatureSERIALIZE: {"serialize", true},
|
||||
X86FeatureHYBRID_CPU: {"hybrid_cpu", false},
|
||||
X86FeatureTSXLDTRK: {"tsxldtrk", true},
|
||||
X86FeaturePCONFIG: {"pconfig", true},
|
||||
X86FeatureARCH_LBR: {"arch_lbr", true},
|
||||
X86FeatureIBT: {"ibt", true},
|
||||
X86FeatureAMX_BF16: {"amx_bf16", true},
|
||||
X86FeatureAVX512_FP16: {"avx512_fp16", true},
|
||||
X86FeatureAMX_TILE: {"amx_tile", true},
|
||||
X86FeatureAMX_INT8: {"amx_int8", true},
|
||||
X86FeatureSPEC_CTRL: {"spec_ctrl", false},
|
||||
X86FeatureINTEL_STIBP: {"intel_stibp", false},
|
||||
X86FeatureFLUSH_L1D: {"flush_l1d", true},
|
||||
X86FeatureARCH_CAPABILITIES: {"arch_capabilities", true},
|
||||
X86FeatureCORE_CAPABILITIES: {"core_capabilities", false},
|
||||
X86FeatureSPEC_CTRL_SSBD: {"spec_ctrl_ssbd", false},
|
||||
}
|
||||
|
||||
// linuxBlockOrder defines the order in which linux organizes the feature
|
||||
// blocks. Linux also tracks feature bits in 32-bit blocks, but in an order
|
||||
// which doesn't match well here, so for the /proc/cpuinfo generation we simply
|
||||
// re-map the blocks to Linux's ordering and then go through the bits in each
|
||||
// block.
|
||||
var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3, 7}
|
||||
|
||||
func archFlagOrder(fn func(Feature)) {
|
||||
for _, b := range linuxBlockOrder {
|
||||
for i := 0; i < blockSize; i++ {
|
||||
f := featureID(b, i)
|
||||
if _, ok := allFeatures[f]; ok {
|
||||
fn(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
147
vendor/gvisor.dev/gvisor/pkg/cpuid/features_arm64.go
vendored
Normal file
147
vendor/gvisor.dev/gvisor/pkg/cpuid/features_arm64.go
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package cpuid
|
||||
|
||||
const (
|
||||
// ARM64FeatureFP indicates support for single and double precision
|
||||
// float point types.
|
||||
ARM64FeatureFP Feature = iota
|
||||
|
||||
// ARM64FeatureASIMD indicates support for Advanced SIMD with single
|
||||
// and double precision float point arithmetic.
|
||||
ARM64FeatureASIMD
|
||||
|
||||
// ARM64FeatureEVTSTRM indicates support for the generic timer
|
||||
// configured to generate events at a frequency of approximately
|
||||
// 100KHz.
|
||||
ARM64FeatureEVTSTRM
|
||||
|
||||
// ARM64FeatureAES indicates support for AES instructions
|
||||
// (AESE/AESD/AESMC/AESIMC).
|
||||
ARM64FeatureAES
|
||||
|
||||
// ARM64FeaturePMULL indicates support for AES instructions
|
||||
// (PMULL/PMULL2).
|
||||
ARM64FeaturePMULL
|
||||
|
||||
// ARM64FeatureSHA1 indicates support for SHA1 instructions
|
||||
// (SHA1C/SHA1P/SHA1M etc).
|
||||
ARM64FeatureSHA1
|
||||
|
||||
// ARM64FeatureSHA2 indicates support for SHA2 instructions
|
||||
// (SHA256H/SHA256H2/SHA256SU0 etc).
|
||||
ARM64FeatureSHA2
|
||||
|
||||
// ARM64FeatureCRC32 indicates support for CRC32 instructions
|
||||
// (CRC32B/CRC32H/CRC32W etc).
|
||||
ARM64FeatureCRC32
|
||||
|
||||
// ARM64FeatureATOMICS indicates support for atomic instructions
|
||||
// (LDADD/LDCLR/LDEOR/LDSET etc).
|
||||
ARM64FeatureATOMICS
|
||||
|
||||
// ARM64FeatureFPHP indicates support for half precision float point
|
||||
// arithmetic.
|
||||
ARM64FeatureFPHP
|
||||
|
||||
// ARM64FeatureASIMDHP indicates support for ASIMD with half precision
|
||||
// float point arithmetic.
|
||||
ARM64FeatureASIMDHP
|
||||
|
||||
// ARM64FeatureCPUID indicates support for EL0 access to certain ID
|
||||
// registers is available.
|
||||
ARM64FeatureCPUID
|
||||
|
||||
// ARM64FeatureASIMDRDM indicates support for SQRDMLAH and SQRDMLSH
|
||||
// instructions.
|
||||
ARM64FeatureASIMDRDM
|
||||
|
||||
// ARM64FeatureJSCVT indicates support for the FJCVTZS instruction.
|
||||
ARM64FeatureJSCVT
|
||||
|
||||
// ARM64FeatureFCMA indicates support for the FCMLA and FCADD
|
||||
// instructions.
|
||||
ARM64FeatureFCMA
|
||||
|
||||
// ARM64FeatureLRCPC indicates support for the LDAPRB/LDAPRH/LDAPR
|
||||
// instructions.
|
||||
ARM64FeatureLRCPC
|
||||
|
||||
// ARM64FeatureDCPOP indicates support for DC instruction (DC CVAP).
|
||||
ARM64FeatureDCPOP
|
||||
|
||||
// ARM64FeatureSHA3 indicates support for SHA3 instructions
|
||||
// (EOR3/RAX1/XAR/BCAX).
|
||||
ARM64FeatureSHA3
|
||||
|
||||
// ARM64FeatureSM3 indicates support for SM3 instructions
|
||||
// (SM3SS1/SM3TT1A/SM3TT1B).
|
||||
ARM64FeatureSM3
|
||||
|
||||
// ARM64FeatureSM4 indicates support for SM4 instructions
|
||||
// (SM4E/SM4EKEY).
|
||||
ARM64FeatureSM4
|
||||
|
||||
// ARM64FeatureASIMDDP indicates support for dot product instructions
|
||||
// (UDOT/SDOT).
|
||||
ARM64FeatureASIMDDP
|
||||
|
||||
// ARM64FeatureSHA512 indicates support for SHA2 instructions
|
||||
// (SHA512H/SHA512H2/SHA512SU0).
|
||||
ARM64FeatureSHA512
|
||||
|
||||
// ARM64FeatureSVE indicates support for Scalable Vector Extension.
|
||||
ARM64FeatureSVE
|
||||
|
||||
// ARM64FeatureASIMDFHM indicates support for FMLAL and FMLSL
|
||||
// instructions.
|
||||
ARM64FeatureASIMDFHM
|
||||
)
|
||||
|
||||
var allFeatures = map[Feature]allFeatureInfo{
|
||||
ARM64FeatureFP: {"fp", true},
|
||||
ARM64FeatureASIMD: {"asimd", true},
|
||||
ARM64FeatureEVTSTRM: {"evtstrm", true},
|
||||
ARM64FeatureAES: {"aes", true},
|
||||
ARM64FeaturePMULL: {"pmull", true},
|
||||
ARM64FeatureSHA1: {"sha1", true},
|
||||
ARM64FeatureSHA2: {"sha2", true},
|
||||
ARM64FeatureCRC32: {"crc32", true},
|
||||
ARM64FeatureATOMICS: {"atomics", true},
|
||||
ARM64FeatureFPHP: {"fphp", true},
|
||||
ARM64FeatureASIMDHP: {"asimdhp", true},
|
||||
ARM64FeatureCPUID: {"cpuid", true},
|
||||
ARM64FeatureASIMDRDM: {"asimdrdm", true},
|
||||
ARM64FeatureJSCVT: {"jscvt", true},
|
||||
ARM64FeatureFCMA: {"fcma", true},
|
||||
ARM64FeatureLRCPC: {"lrcpc", true},
|
||||
ARM64FeatureDCPOP: {"dcpop", true},
|
||||
ARM64FeatureSHA3: {"sha3", true},
|
||||
ARM64FeatureSM3: {"sm3", true},
|
||||
ARM64FeatureSM4: {"sm4", true},
|
||||
ARM64FeatureASIMDDP: {"asimddp", true},
|
||||
ARM64FeatureSHA512: {"sha512", true},
|
||||
ARM64FeatureSVE: {"sve", true},
|
||||
ARM64FeatureASIMDFHM: {"asimdfhm", true},
|
||||
}
|
||||
|
||||
func archFlagOrder(fn func(Feature)) {
|
||||
for i := 0; i < len(allFeatures); i++ {
|
||||
fn(Feature(i))
|
||||
}
|
||||
}
|
||||
229
vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go
vendored
Normal file
229
vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go
vendored
Normal file
@@ -0,0 +1,229 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64
|
||||
// +build amd64
|
||||
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/log"
|
||||
)
|
||||
|
||||
// cpuididFunction is a useful type wrapper. The format is eax | (ecx << 32).
|
||||
type cpuidFunction uint64
|
||||
|
||||
func (f cpuidFunction) eax() uint32 {
|
||||
return uint32(f)
|
||||
}
|
||||
|
||||
func (f cpuidFunction) ecx() uint32 {
|
||||
return uint32(f >> 32)
|
||||
}
|
||||
|
||||
// The constants below are the lower or "standard" cpuid functions, ordered as
|
||||
// defined by the hardware. Note that these may not be included in the standard
|
||||
// set of functions that we are allowed to execute, which are filtered in the
|
||||
// Native.Query function defined below.
|
||||
const (
|
||||
vendorID cpuidFunction = 0x0 // Returns vendor ID and largest standard function.
|
||||
featureInfo cpuidFunction = 0x1 // Returns basic feature bits and processor signature.
|
||||
intelCacheDescriptors cpuidFunction = 0x2 // Returns list of cache descriptors. Intel only.
|
||||
intelSerialNumber cpuidFunction = 0x3 // Returns processor serial number (obsolete on new hardware). Intel only.
|
||||
intelDeterministicCacheParams cpuidFunction = 0x4 // Returns deterministic cache information. Intel only.
|
||||
monitorMwaitParams cpuidFunction = 0x5 // Returns information about monitor/mwait instructions.
|
||||
powerParams cpuidFunction = 0x6 // Returns information about power management and thermal sensors.
|
||||
extendedFeatureInfo cpuidFunction = 0x7 // Returns extended feature bits.
|
||||
_ // Function 0x8 is reserved.
|
||||
intelDCAParams cpuidFunction = 0x9 // Returns direct cache access information. Intel only.
|
||||
intelPMCInfo cpuidFunction = 0xa // Returns information about performance monitoring features. Intel only.
|
||||
intelX2APICInfo cpuidFunction = 0xb // Returns core/logical processor topology. Intel only.
|
||||
_ // Function 0xc is reserved.
|
||||
xSaveInfo cpuidFunction = 0xd // Returns information about extended state management.
|
||||
xSaveInfoSub cpuidFunction = 0xd | (0x1 << 32) // Returns information about extended state management (Sub-leaf).
|
||||
)
|
||||
|
||||
const xSaveInfoNumLeaves = 64 // Maximum number of xSaveInfo leaves.
|
||||
|
||||
// The "extended" functions.
|
||||
const (
|
||||
extendedStart cpuidFunction = 0x80000000
|
||||
extendedFunctionInfo cpuidFunction = extendedStart + 0 // Returns highest available extended function in eax.
|
||||
extendedFeatures = extendedStart + 1 // Returns some extended feature bits in edx and ecx.
|
||||
processorBrandString2 = extendedStart + 2 // Processor Name String Identifier.
|
||||
processorBrandString3 = extendedStart + 3 // Processor Name String Identifier.
|
||||
processorBrandString4 = extendedStart + 4 // Processor Name String Identifier.
|
||||
l1CacheAndTLBInfo = extendedStart + 5 // Returns L2 cache information.
|
||||
l2CacheInfo = extendedStart + 6 // Returns L2 cache information.
|
||||
addressSizes = extendedStart + 8 // Physical and virtual address sizes.
|
||||
)
|
||||
|
||||
var allowedBasicFunctions = [...]bool{
|
||||
vendorID: true,
|
||||
featureInfo: true,
|
||||
extendedFeatureInfo: true,
|
||||
intelCacheDescriptors: true,
|
||||
intelDeterministicCacheParams: true,
|
||||
xSaveInfo: true,
|
||||
}
|
||||
|
||||
var allowedExtendedFunctions = [...]bool{
|
||||
extendedFunctionInfo - extendedStart: true,
|
||||
extendedFeatures - extendedStart: true,
|
||||
addressSizes - extendedStart: true,
|
||||
processorBrandString2 - extendedStart: true,
|
||||
processorBrandString3 - extendedStart: true,
|
||||
processorBrandString4 - extendedStart: true,
|
||||
l1CacheAndTLBInfo - extendedStart: true,
|
||||
l2CacheInfo - extendedStart: true,
|
||||
}
|
||||
|
||||
// Function executes a CPUID function.
|
||||
//
|
||||
// This is typically the native function or a Static definition.
|
||||
type Function interface {
|
||||
Query(In) Out
|
||||
}
|
||||
|
||||
// Native is a native Function.
|
||||
//
|
||||
// This implements Function.
|
||||
type Native struct{}
|
||||
|
||||
// In is input to the Query function.
|
||||
//
|
||||
// +stateify savable
|
||||
type In struct {
|
||||
Eax uint32
|
||||
Ecx uint32
|
||||
}
|
||||
|
||||
// normalize drops irrelevant Ecx values.
|
||||
func (i *In) normalize() {
|
||||
switch cpuidFunction(i.Eax) {
|
||||
case vendorID, featureInfo, intelCacheDescriptors, extendedFunctionInfo, extendedFeatures:
|
||||
i.Ecx = 0 // Ignore.
|
||||
case processorBrandString2, processorBrandString3, processorBrandString4, l1CacheAndTLBInfo, l2CacheInfo:
|
||||
i.Ecx = 0 // Ignore.
|
||||
case intelDeterministicCacheParams, extendedFeatureInfo:
|
||||
// Preserve i.Ecx.
|
||||
}
|
||||
}
|
||||
|
||||
// Out is output from the Query function.
|
||||
//
|
||||
// +stateify savable
|
||||
type Out struct {
|
||||
Eax uint32
|
||||
Ebx uint32
|
||||
Ecx uint32
|
||||
Edx uint32
|
||||
}
|
||||
|
||||
// native is the native Query function.
|
||||
func native(In) Out
|
||||
|
||||
// Query executes CPUID natively.
|
||||
//
|
||||
// This implements Function.
|
||||
//
|
||||
//go:nosplit
|
||||
func (*Native) Query(in In) Out {
|
||||
if int(in.Eax) < len(allowedBasicFunctions) && allowedBasicFunctions[in.Eax] {
|
||||
return native(in)
|
||||
} else if in.Eax >= uint32(extendedStart) {
|
||||
if l := int(in.Eax - uint32(extendedStart)); l < len(allowedExtendedFunctions) && allowedExtendedFunctions[l] {
|
||||
return native(in)
|
||||
}
|
||||
}
|
||||
return Out{} // All zeros.
|
||||
}
|
||||
|
||||
// query is a internal wrapper.
|
||||
//
|
||||
//go:nosplit
|
||||
func (fs FeatureSet) query(fn cpuidFunction) (uint32, uint32, uint32, uint32) {
|
||||
out := fs.Query(In{Eax: fn.eax(), Ecx: fn.ecx()})
|
||||
return out.Eax, out.Ebx, out.Ecx, out.Edx
|
||||
}
|
||||
|
||||
var hostFeatureSet FeatureSet
|
||||
|
||||
// HostFeatureSet returns a host CPUID.
|
||||
//
|
||||
//go:nosplit
|
||||
func HostFeatureSet() FeatureSet {
|
||||
return hostFeatureSet
|
||||
}
|
||||
|
||||
var (
|
||||
// cpuFreqMHz is the native CPU frequency.
|
||||
cpuFreqMHz float64
|
||||
)
|
||||
|
||||
// Reads max cpu frequency from host /proc/cpuinfo. Must run before syscall
|
||||
// filter installation. This value is used to create the fake /proc/cpuinfo
|
||||
// from a FeatureSet.
|
||||
func readMaxCPUFreq() {
|
||||
cpuinfob, err := ioutil.ReadFile("/proc/cpuinfo")
|
||||
if err != nil {
|
||||
// Leave it as 0... the VDSO bails out in the same way.
|
||||
log.Warningf("Could not read /proc/cpuinfo: %v", err)
|
||||
return
|
||||
}
|
||||
cpuinfo := string(cpuinfob)
|
||||
|
||||
// We get the value straight from host /proc/cpuinfo. On machines with
|
||||
// frequency scaling enabled, this will only get the current value
|
||||
// which will likely be inaccurate. This is fine on machines with
|
||||
// frequency scaling disabled.
|
||||
for _, line := range strings.Split(cpuinfo, "\n") {
|
||||
if strings.Contains(line, "cpu MHz") {
|
||||
splitMHz := strings.Split(line, ":")
|
||||
if len(splitMHz) < 2 {
|
||||
log.Warningf("Could not read /proc/cpuinfo: malformed cpu MHz line")
|
||||
return
|
||||
}
|
||||
|
||||
// If there was a problem, leave cpuFreqMHz as 0.
|
||||
var err error
|
||||
cpuFreqMHz, err = strconv.ParseFloat(strings.TrimSpace(splitMHz[1]), 64)
|
||||
if err != nil {
|
||||
log.Warningf("Could not parse cpu MHz value %v: %v", splitMHz[1], err)
|
||||
cpuFreqMHz = 0
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Warningf("Could not parse /proc/cpuinfo, it is empty or does not contain cpu MHz")
|
||||
|
||||
}
|
||||
|
||||
// xgetbv reads an extended control register.
|
||||
func xgetbv(reg uintptr) uint64
|
||||
|
||||
// archInitialize initializes hostFeatureSet.
|
||||
func archInitialize() {
|
||||
hostFeatureSet = FeatureSet{
|
||||
Function: &Native{},
|
||||
}.Fixed()
|
||||
|
||||
readMaxCPUFreq()
|
||||
initHWCap()
|
||||
}
|
||||
38
vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s
vendored
Normal file
38
vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·native(SB),NOSPLIT|NOFRAME,$0-24
|
||||
MOVL arg_Eax+0(FP), AX
|
||||
MOVL arg_Ecx+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, ret_Eax+8(FP)
|
||||
MOVL BX, ret_Ebx+12(FP)
|
||||
MOVL CX, ret_Ecx+16(FP)
|
||||
MOVL DX, ret_Edx+20(FP)
|
||||
RET
|
||||
|
||||
// xgetbv reads an extended control register.
|
||||
//
|
||||
// The code corresponds to:
|
||||
//
|
||||
// xgetbv
|
||||
//
|
||||
TEXT ·xgetbv(SB),NOSPLIT|NOFRAME,$0-16
|
||||
MOVQ reg+0(FP), CX
|
||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0;
|
||||
MOVL AX, ret+8(FP)
|
||||
MOVL DX, ret+12(FP)
|
||||
RET
|
||||
157
vendor/gvisor.dev/gvisor/pkg/cpuid/native_arm64.go
vendored
Normal file
157
vendor/gvisor.dev/gvisor/pkg/cpuid/native_arm64.go
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package cpuid
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/log"
|
||||
)
|
||||
|
||||
// hostFeatureSet is initialized at startup.
|
||||
//
|
||||
// This is copied for HostFeatureSet, below.
|
||||
var hostFeatureSet FeatureSet
|
||||
|
||||
// HostFeatureSet returns a copy of the host FeatureSet.
|
||||
func HostFeatureSet() FeatureSet {
|
||||
return hostFeatureSet
|
||||
}
|
||||
|
||||
// Fixed returns the same feature set.
|
||||
func (fs FeatureSet) Fixed() FeatureSet {
|
||||
return fs
|
||||
}
|
||||
|
||||
// Reads CPU information from host /proc/cpuinfo.
|
||||
//
|
||||
// Must run before syscall filter installation. This value is used to create
|
||||
// the fake /proc/cpuinfo from a FeatureSet.
|
||||
func initCPUInfo() {
|
||||
if runtime.GOOS != "linux" {
|
||||
// Don't try to read Linux-specific /proc files or
|
||||
// warn about them not existing.
|
||||
return
|
||||
}
|
||||
cpuinfob, err := ioutil.ReadFile("/proc/cpuinfo")
|
||||
if err != nil {
|
||||
// Leave everything at 0, nothing can be done.
|
||||
log.Warningf("Could not read /proc/cpuinfo: %v", err)
|
||||
return
|
||||
}
|
||||
cpuinfo := string(cpuinfob)
|
||||
|
||||
// We get the value straight from host /proc/cpuinfo.
|
||||
for _, line := range strings.Split(cpuinfo, "\n") {
|
||||
switch {
|
||||
case strings.Contains(line, "BogoMIPS"):
|
||||
splitMHz := strings.Split(line, ":")
|
||||
if len(splitMHz) < 2 {
|
||||
log.Warningf("Could not read /proc/cpuinfo: malformed BogoMIPS")
|
||||
break
|
||||
}
|
||||
|
||||
// If there was a problem, leave cpuFreqMHz as 0.
|
||||
var err error
|
||||
hostFeatureSet.cpuFreqMHz, err = strconv.ParseFloat(strings.TrimSpace(splitMHz[1]), 64)
|
||||
if err != nil {
|
||||
hostFeatureSet.cpuFreqMHz = 0.0
|
||||
log.Warningf("Could not parse BogoMIPS value %v: %v", splitMHz[1], err)
|
||||
}
|
||||
case strings.Contains(line, "CPU implementer"):
|
||||
splitImpl := strings.Split(line, ":")
|
||||
if len(splitImpl) < 2 {
|
||||
log.Warningf("Could not read /proc/cpuinfo: malformed CPU implementer")
|
||||
break
|
||||
}
|
||||
|
||||
// If there was a problem, leave cpuImplHex as 0.
|
||||
var err error
|
||||
hostFeatureSet.cpuImplHex, err = strconv.ParseUint(strings.TrimSpace(splitImpl[1]), 0, 64)
|
||||
if err != nil {
|
||||
hostFeatureSet.cpuImplHex = 0
|
||||
log.Warningf("Could not parse CPU implementer value %v: %v", splitImpl[1], err)
|
||||
}
|
||||
case strings.Contains(line, "CPU architecture"):
|
||||
splitArch := strings.Split(line, ":")
|
||||
if len(splitArch) < 2 {
|
||||
log.Warningf("Could not read /proc/cpuinfo: malformed CPU architecture")
|
||||
break
|
||||
}
|
||||
|
||||
// If there was a problem, leave cpuArchDec as 0.
|
||||
var err error
|
||||
hostFeatureSet.cpuArchDec, err = strconv.ParseUint(strings.TrimSpace(splitArch[1]), 0, 64)
|
||||
if err != nil {
|
||||
hostFeatureSet.cpuArchDec = 0
|
||||
log.Warningf("Could not parse CPU architecture value %v: %v", splitArch[1], err)
|
||||
}
|
||||
case strings.Contains(line, "CPU variant"):
|
||||
splitVar := strings.Split(line, ":")
|
||||
if len(splitVar) < 2 {
|
||||
log.Warningf("Could not read /proc/cpuinfo: malformed CPU variant")
|
||||
break
|
||||
}
|
||||
|
||||
// If there was a problem, leave cpuVarHex as 0.
|
||||
var err error
|
||||
hostFeatureSet.cpuVarHex, err = strconv.ParseUint(strings.TrimSpace(splitVar[1]), 0, 64)
|
||||
if err != nil {
|
||||
hostFeatureSet.cpuVarHex = 0
|
||||
log.Warningf("Could not parse CPU variant value %v: %v", splitVar[1], err)
|
||||
}
|
||||
case strings.Contains(line, "CPU part"):
|
||||
splitPart := strings.Split(line, ":")
|
||||
if len(splitPart) < 2 {
|
||||
log.Warningf("Could not read /proc/cpuinfo: malformed CPU part")
|
||||
break
|
||||
}
|
||||
|
||||
// If there was a problem, leave cpuPartHex as 0.
|
||||
var err error
|
||||
hostFeatureSet.cpuPartHex, err = strconv.ParseUint(strings.TrimSpace(splitPart[1]), 0, 64)
|
||||
if err != nil {
|
||||
hostFeatureSet.cpuPartHex = 0
|
||||
log.Warningf("Could not parse CPU part value %v: %v", splitPart[1], err)
|
||||
}
|
||||
case strings.Contains(line, "CPU revision"):
|
||||
splitRev := strings.Split(line, ":")
|
||||
if len(splitRev) < 2 {
|
||||
log.Warningf("Could not read /proc/cpuinfo: malformed CPU revision")
|
||||
break
|
||||
}
|
||||
|
||||
// If there was a problem, leave cpuRevDec as 0.
|
||||
var err error
|
||||
hostFeatureSet.cpuRevDec, err = strconv.ParseUint(strings.TrimSpace(splitRev[1]), 0, 64)
|
||||
if err != nil {
|
||||
hostFeatureSet.cpuRevDec = 0
|
||||
log.Warningf("Could not parse CPU revision value %v: %v", splitRev[1], err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// archInitialize initializes hostFeatureSet.
|
||||
func archInitialize() {
|
||||
initCPUInfo()
|
||||
initHWCap()
|
||||
}
|
||||
133
vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go
vendored
Normal file
133
vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2019 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64
|
||||
// +build amd64
|
||||
|
||||
package cpuid
|
||||
|
||||
import "context"
|
||||
|
||||
// Static is a static CPUID function.
|
||||
//
|
||||
// +stateify savable
|
||||
type Static map[In]Out
|
||||
|
||||
// Fixed converts the FeatureSet to a fixed set.
|
||||
func (fs FeatureSet) Fixed() FeatureSet {
|
||||
return fs.ToStatic().ToFeatureSet()
|
||||
}
|
||||
|
||||
// ToStatic converts a FeatureSet to a Static function.
|
||||
//
|
||||
// You can create a new static feature set as:
|
||||
//
|
||||
// fs := otherFeatureSet.ToStatic().ToFeatureSet()
|
||||
func (fs FeatureSet) ToStatic() Static {
|
||||
s := make(Static)
|
||||
|
||||
// Save all allowed top-level functions.
|
||||
for fn, allowed := range allowedBasicFunctions {
|
||||
if allowed {
|
||||
in := In{Eax: uint32(fn)}
|
||||
s[in] = fs.Query(in)
|
||||
}
|
||||
}
|
||||
|
||||
// Save all allowed extended functions.
|
||||
for fn, allowed := range allowedExtendedFunctions {
|
||||
if allowed {
|
||||
in := In{Eax: uint32(fn) + uint32(extendedStart)}
|
||||
s[in] = fs.Query(in)
|
||||
}
|
||||
}
|
||||
|
||||
// Save all features (may be redundant).
|
||||
for feature := range allFeatures {
|
||||
feature.set(s, fs.HasFeature(feature))
|
||||
}
|
||||
|
||||
// Processor Extended State Enumeration.
|
||||
for i := uint32(0); i < xSaveInfoNumLeaves; i++ {
|
||||
in := In{Eax: uint32(xSaveInfo), Ecx: i}
|
||||
s[in] = fs.Query(in)
|
||||
}
|
||||
|
||||
// Save all cache information.
|
||||
out := fs.Query(In{Eax: uint32(featureInfo)})
|
||||
for i := uint32(0); i < out.Ecx; i++ {
|
||||
in := In{Eax: uint32(intelDeterministicCacheParams), Ecx: i}
|
||||
out := fs.Query(in)
|
||||
s[in] = out
|
||||
if CacheType(out.Eax&0xf) == cacheNull {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// ToFeatureSet converts a static specification to a FeatureSet.
|
||||
//
|
||||
// This overloads some local values, where required.
|
||||
func (s Static) ToFeatureSet() FeatureSet {
|
||||
// Make a copy.
|
||||
ns := make(Static)
|
||||
for k, v := range s {
|
||||
ns[k] = v
|
||||
}
|
||||
ns.normalize()
|
||||
return FeatureSet{ns, hwCap{}}
|
||||
}
|
||||
|
||||
// afterLoad calls normalize.
|
||||
func (s Static) afterLoad(context.Context) {
|
||||
s.normalize()
|
||||
}
|
||||
|
||||
// normalize normalizes FPU sizes.
|
||||
func (s Static) normalize() {
|
||||
// Override local FPU sizes, which must be fixed.
|
||||
fs := FeatureSet{s, hwCap{}}
|
||||
if fs.HasFeature(X86FeatureXSAVE) {
|
||||
in := In{Eax: uint32(xSaveInfo)}
|
||||
out := s[in]
|
||||
out.Ecx = maxXsaveSize
|
||||
out.Ebx = xsaveSize
|
||||
s[in] = out
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a feature.
|
||||
func (s Static) Add(feature Feature) Static {
|
||||
feature.set(s, true)
|
||||
return s
|
||||
}
|
||||
|
||||
// Remove removes a feature.
|
||||
func (s Static) Remove(feature Feature) Static {
|
||||
feature.set(s, false)
|
||||
return s
|
||||
}
|
||||
|
||||
// Set implements ChangeableSet.Set.
|
||||
func (s Static) Set(in In, out Out) {
|
||||
s[in] = out
|
||||
}
|
||||
|
||||
// Query implements Function.Query.
|
||||
func (s Static) Query(in In) Out {
|
||||
in.normalize()
|
||||
return s[in]
|
||||
}
|
||||
51
vendor/gvisor.dev/gvisor/pkg/gohacks/linkname_go113_unsafe.go
vendored
Normal file
51
vendor/gvisor.dev/gvisor/pkg/gohacks/linkname_go113_unsafe.go
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.13
|
||||
|
||||
// //go:linkname directives type-checked by checklinkname. Any other
|
||||
// non-linkname assumptions outside the Go 1 compatibility guarantee should
|
||||
// have an accompanied vet check or version guard build tag.
|
||||
|
||||
// Package gohacks contains utilities for subverting the Go compiler.
|
||||
package gohacks
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Note that go:linkname silently doesn't work if the local name is exported,
|
||||
// necessitating an indirection for exported functions.
|
||||
|
||||
// Memmove is runtime.memmove, exported for SeqAtomicLoad/SeqAtomicTryLoad<T>.
|
||||
//
|
||||
//go:nosplit
|
||||
func Memmove(to, from unsafe.Pointer, n uintptr) {
|
||||
memmove(to, from, n)
|
||||
}
|
||||
|
||||
//go:linkname memmove runtime.memmove
|
||||
//go:noescape
|
||||
func memmove(to, from unsafe.Pointer, n uintptr)
|
||||
|
||||
// Nanotime is runtime.nanotime.
|
||||
//
|
||||
//go:nosplit
|
||||
func Nanotime() int64 {
|
||||
return nanotime()
|
||||
}
|
||||
|
||||
//go:linkname nanotime runtime.nanotime
|
||||
//go:noescape
|
||||
func nanotime() int64
|
||||
34
vendor/gvisor.dev/gvisor/pkg/gohacks/noescape_unsafe.go
vendored
Normal file
34
vendor/gvisor.dev/gvisor/pkg/gohacks/noescape_unsafe.go
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gohacks
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Noescape hides a pointer from escape analysis. Noescape is the identity
|
||||
// function but escape analysis doesn't think the output depends on the input.
|
||||
// Noescape is inlined and currently compiles down to zero instructions.
|
||||
// USE CAREFULLY!
|
||||
//
|
||||
// Noescape is copy/pasted from Go's runtime/stubs.go:noescape(), and is valid
|
||||
// as of Go 1.20. It is possible that this approach stops working in future
|
||||
// versions of the toolchain, at which point `p` may still escape.
|
||||
//
|
||||
//go:nosplit
|
||||
func Noescape(p unsafe.Pointer) unsafe.Pointer {
|
||||
x := uintptr(p)
|
||||
return unsafe.Pointer(x ^ 0)
|
||||
}
|
||||
45
vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go113_unsafe.go
vendored
Normal file
45
vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go113_unsafe.go
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.13 && !go1.20
|
||||
// +build go1.13,!go1.20
|
||||
|
||||
// TODO(go.dev/issue/8422): Remove this once Go 1.19 is no longer supported,
|
||||
// and update callers to use unsafe.Slice directly.
|
||||
|
||||
package gohacks
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// sliceHeader is equivalent to reflect.SliceHeader, but represents the pointer
|
||||
// to the underlying array as unsafe.Pointer rather than uintptr, allowing
|
||||
// sliceHeaders to be directly converted to slice objects.
|
||||
type sliceHeader struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
Cap int
|
||||
}
|
||||
|
||||
// Slice returns a slice whose underlying array starts at ptr an which length
|
||||
// and capacity are len.
|
||||
func Slice[T any](ptr *T, length int) []T {
|
||||
var s []T
|
||||
hdr := (*sliceHeader)(unsafe.Pointer(&s))
|
||||
hdr.Data = unsafe.Pointer(ptr)
|
||||
hdr.Len = length
|
||||
hdr.Cap = length
|
||||
return s
|
||||
}
|
||||
30
vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go120_unsafe.go
vendored
Normal file
30
vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go120_unsafe.go
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package gohacks
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Slice returns a slice whose underlying array starts at ptr an which length
|
||||
// and capacity are len.
|
||||
//
|
||||
// Slice is a wrapper around unsafe.Slice. Prefer to use unsafe.Slice directly
|
||||
// if possible.
|
||||
func Slice[T any](ptr *T, length int) []T {
|
||||
return unsafe.Slice(ptr, length)
|
||||
}
|
||||
51
vendor/gvisor.dev/gvisor/pkg/gohacks/string_go113_unsafe.go
vendored
Normal file
51
vendor/gvisor.dev/gvisor/pkg/gohacks/string_go113_unsafe.go
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.13 && !go1.20
|
||||
// +build go1.13,!go1.20
|
||||
|
||||
// TODO(go.dev/issue/8422): Remove this file once Go 1.19 is no longer
|
||||
// supported.
|
||||
|
||||
package gohacks
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// stringHeader is equivalent to reflect.StringHeader, but represents the
|
||||
// pointer to the underlying array as unsafe.Pointer rather than uintptr,
|
||||
// allowing StringHeaders to be directly converted to strings.
|
||||
type stringHeader struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
}
|
||||
|
||||
// ImmutableBytesFromString is equivalent to []byte(s), except that it uses the
|
||||
// same memory backing s instead of making a heap-allocated copy. This is only
|
||||
// valid if the returned slice is never mutated.
|
||||
func ImmutableBytesFromString(s string) []byte {
|
||||
shdr := (*stringHeader)(unsafe.Pointer(&s))
|
||||
return Slice((*byte)(shdr.Data), shdr.Len)
|
||||
}
|
||||
|
||||
// StringFromImmutableBytes is equivalent to string(bs), except that it uses
|
||||
// the same memory backing bs instead of making a heap-allocated copy. This is
|
||||
// only valid if bs is never mutated after StringFromImmutableBytes returns.
|
||||
func StringFromImmutableBytes(bs []byte) string {
|
||||
// This is cheaper than messing with StringHeader and SliceHeader, which as
|
||||
// of this writing produces many dead stores of zeroes. Compare
|
||||
// strings.Builder.String().
|
||||
return *(*string)(unsafe.Pointer(&bs))
|
||||
}
|
||||
39
vendor/gvisor.dev/gvisor/pkg/gohacks/string_go120_unsafe.go
vendored
Normal file
39
vendor/gvisor.dev/gvisor/pkg/gohacks/string_go120_unsafe.go
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package gohacks
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ImmutableBytesFromString is equivalent to []byte(s), except that it uses the
|
||||
// same memory backing s instead of making a heap-allocated copy. This is only
|
||||
// valid if the returned slice is never mutated.
|
||||
func ImmutableBytesFromString(s string) []byte {
|
||||
b := unsafe.StringData(s)
|
||||
return unsafe.Slice(b, len(s))
|
||||
}
|
||||
|
||||
// StringFromImmutableBytes is equivalent to string(bs), except that it uses
|
||||
// the same memory backing bs instead of making a heap-allocated copy. This is
|
||||
// only valid if bs is never mutated after StringFromImmutableBytes returns.
|
||||
func StringFromImmutableBytes(bs []byte) string {
|
||||
if len(bs) == 0 {
|
||||
return ""
|
||||
}
|
||||
return unsafe.String(&bs[0], len(bs))
|
||||
}
|
||||
28
vendor/gvisor.dev/gvisor/pkg/goid/goid.go
vendored
Normal file
28
vendor/gvisor.dev/gvisor/pkg/goid/goid.go
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package goid provides the Get function.
|
||||
package goid
|
||||
|
||||
import (
|
||||
_ "runtime" // For facts in assembly files.
|
||||
)
|
||||
|
||||
// goid returns the current goid, it is defined in assembly.
|
||||
func goid() int64
|
||||
|
||||
// Get returns the ID of the current goroutine.
|
||||
func Get() int64 {
|
||||
return goid()
|
||||
}
|
||||
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_122_amd64.s
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_122_amd64.s
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.23
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define GOID_OFFSET 152 // +checkoffset runtime g.goid
|
||||
|
||||
// func goid() int64
|
||||
TEXT ·goid(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVQ (TLS), R14
|
||||
MOVQ GOID_OFFSET(R14), R14
|
||||
MOVQ R14, ret+0(FP)
|
||||
RET
|
||||
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_122_arm64.s
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_122_arm64.s
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.23
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define GOID_OFFSET 152 // +checkoffset runtime g.goid
|
||||
|
||||
// func goid() int64
|
||||
TEXT ·goid(SB),NOSPLIT,$0-8
|
||||
MOVD g, R0 // g
|
||||
MOVD GOID_OFFSET(R0), R0
|
||||
MOVD R0, ret+0(FP)
|
||||
RET
|
||||
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_123_amd64.s
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_123_amd64.s
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.23
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define GOID_OFFSET 160 // +checkoffset runtime g.goid
|
||||
|
||||
// func goid() int64
|
||||
TEXT ·goid(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVQ (TLS), R14
|
||||
MOVQ GOID_OFFSET(R14), R14
|
||||
MOVQ R14, ret+0(FP)
|
||||
RET
|
||||
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_123_arm64.s
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/goid/goid_123_arm64.s
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.23
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define GOID_OFFSET 160 // +checkoffset runtime g.goid
|
||||
|
||||
// func goid() int64
|
||||
TEXT ·goid(SB),NOSPLIT,$0-8
|
||||
MOVD g, R0 // g
|
||||
MOVD GOID_OFFSET(R0), R0
|
||||
MOVD R0, ret+0(FP)
|
||||
RET
|
||||
79
vendor/gvisor.dev/gvisor/pkg/linewriter/linewriter.go
vendored
Normal file
79
vendor/gvisor.dev/gvisor/pkg/linewriter/linewriter.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package linewriter provides an io.Writer which calls an emitter on each line.
|
||||
package linewriter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Writer is an io.Writer which buffers input, flushing
|
||||
// individual lines through an emitter function.
|
||||
type Writer struct {
|
||||
// the mutex locks buf.
|
||||
sync.Mutex
|
||||
|
||||
// buf holds the data we haven't emitted yet.
|
||||
buf bytes.Buffer
|
||||
|
||||
// emit is used to flush individual lines.
|
||||
emit func(p []byte)
|
||||
}
|
||||
|
||||
// NewWriter creates a Writer which emits using emitter.
|
||||
// The emitter must not retain p. It may change after emitter returns.
|
||||
func NewWriter(emitter func(p []byte)) *Writer {
|
||||
return &Writer{emit: emitter}
|
||||
}
|
||||
|
||||
// Write implements io.Writer.Write.
|
||||
// It calls emit on each line of input, not including the newline.
|
||||
// Write may be called concurrently.
|
||||
func (w *Writer) Write(p []byte) (int, error) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
|
||||
total := 0
|
||||
for len(p) > 0 {
|
||||
emit := true
|
||||
i := bytes.IndexByte(p, '\n')
|
||||
if i < 0 {
|
||||
// No newline, we will buffer everything.
|
||||
i = len(p)
|
||||
emit = false
|
||||
}
|
||||
|
||||
n, err := w.buf.Write(p[:i])
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
total += n
|
||||
|
||||
p = p[i:]
|
||||
|
||||
if emit {
|
||||
// Skip the newline, but still count it.
|
||||
p = p[1:]
|
||||
total++
|
||||
|
||||
w.emit(w.buf.Bytes())
|
||||
w.buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
return total, nil
|
||||
}
|
||||
86
vendor/gvisor.dev/gvisor/pkg/log/glog.go
vendored
Normal file
86
vendor/gvisor.dev/gvisor/pkg/log/glog.go
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GoogleEmitter is a wrapper that emits logs in a format compatible with
|
||||
// package github.com/golang/glog.
|
||||
type GoogleEmitter struct {
|
||||
*Writer
|
||||
}
|
||||
|
||||
// pid is used for the threadid component of the header.
|
||||
var pid = os.Getpid()
|
||||
|
||||
// Emit emits the message, google-style.
|
||||
//
|
||||
// Log lines have this form:
|
||||
//
|
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
|
||||
//
|
||||
// where the fields are defined as follows:
|
||||
//
|
||||
// L A single character, representing the log level (eg 'I' for INFO)
|
||||
// mm The month (zero padded; ie May is '05')
|
||||
// dd The day (zero padded)
|
||||
// hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
|
||||
// threadid The space-padded thread ID as returned by GetTID()
|
||||
// file The file name
|
||||
// line The line number
|
||||
// msg The user-supplied message
|
||||
func (g GoogleEmitter) Emit(depth int, level Level, timestamp time.Time, format string, args ...any) {
|
||||
// Log level.
|
||||
prefix := byte('?')
|
||||
switch level {
|
||||
case Debug:
|
||||
prefix = byte('D')
|
||||
case Info:
|
||||
prefix = byte('I')
|
||||
case Warning:
|
||||
prefix = byte('W')
|
||||
}
|
||||
|
||||
// Timestamp.
|
||||
_, month, day := timestamp.Date()
|
||||
hour, minute, second := timestamp.Clock()
|
||||
microsecond := int(timestamp.Nanosecond() / 1000)
|
||||
|
||||
// 0 = this frame.
|
||||
_, file, line, ok := runtime.Caller(depth + 1)
|
||||
if ok {
|
||||
// Trim any directory path from the file.
|
||||
slash := strings.LastIndexByte(file, byte('/'))
|
||||
if slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
} else {
|
||||
// We don't have a filename.
|
||||
file = "???"
|
||||
line = 0
|
||||
}
|
||||
|
||||
// Generate the message.
|
||||
message := fmt.Sprintf(format, args...)
|
||||
|
||||
// Emit the formatted result.
|
||||
fmt.Fprintf(g.Writer, "%c%02d%02d %02d:%02d:%02d.%06d % 7d %s:%d] %s\n", prefix, int(month), day, hour, minute, second, microsecond, pid, file, line, message)
|
||||
}
|
||||
85
vendor/gvisor.dev/gvisor/pkg/log/json.go
vendored
Normal file
85
vendor/gvisor.dev/gvisor/pkg/log/json.go
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type jsonLog struct {
|
||||
Msg string `json:"msg"`
|
||||
Level Level `json:"level"`
|
||||
Time time.Time `json:"time"`
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.MarashalJSON.
|
||||
func (l Level) MarshalJSON() ([]byte, error) {
|
||||
switch l {
|
||||
case Warning:
|
||||
return []byte(`"warning"`), nil
|
||||
case Info:
|
||||
return []byte(`"info"`), nil
|
||||
case Debug:
|
||||
return []byte(`"debug"`), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown level %v", l)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON. It can unmarshal
|
||||
// from both string names and integers.
|
||||
func (l *Level) UnmarshalJSON(b []byte) error {
|
||||
switch s := string(b); s {
|
||||
case "0", `"warning"`:
|
||||
*l = Warning
|
||||
case "1", `"info"`:
|
||||
*l = Info
|
||||
case "2", `"debug"`:
|
||||
*l = Debug
|
||||
default:
|
||||
return fmt.Errorf("unknown level %q", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSONEmitter logs messages in json format.
|
||||
type JSONEmitter struct {
|
||||
*Writer
|
||||
}
|
||||
|
||||
// Emit implements Emitter.Emit.
|
||||
func (e JSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) {
|
||||
logLine := fmt.Sprintf(format, v...)
|
||||
if _, file, line, ok := runtime.Caller(depth + 1); ok {
|
||||
if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 {
|
||||
file = file[slash+1:] // Trim any directory path from the file.
|
||||
}
|
||||
logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine)
|
||||
}
|
||||
j := jsonLog{
|
||||
Msg: logLine,
|
||||
Level: level,
|
||||
Time: timestamp,
|
||||
}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.Writer.Write(b)
|
||||
}
|
||||
56
vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go
vendored
Normal file
56
vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type k8sJSONLog struct {
|
||||
Log string `json:"log"`
|
||||
Level Level `json:"level"`
|
||||
Time time.Time `json:"time"`
|
||||
}
|
||||
|
||||
// K8sJSONEmitter logs messages in json format that is compatible with
|
||||
// Kubernetes fluent configuration.
|
||||
type K8sJSONEmitter struct {
|
||||
*Writer
|
||||
}
|
||||
|
||||
// Emit implements Emitter.Emit.
|
||||
func (e K8sJSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) {
|
||||
logLine := fmt.Sprintf(format, v...)
|
||||
if _, file, line, ok := runtime.Caller(depth + 1); ok {
|
||||
if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 {
|
||||
file = file[slash+1:] // Trim any directory path from the file.
|
||||
}
|
||||
logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine)
|
||||
}
|
||||
j := k8sJSONLog{
|
||||
Log: logLine,
|
||||
Level: level,
|
||||
Time: timestamp,
|
||||
}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.Writer.Write(b)
|
||||
}
|
||||
399
vendor/gvisor.dev/gvisor/pkg/log/log.go
vendored
Normal file
399
vendor/gvisor.dev/gvisor/pkg/log/log.go
vendored
Normal file
@@ -0,0 +1,399 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package log implements a library for logging.
|
||||
//
|
||||
// This is separate from the standard logging package because logging may be a
|
||||
// high-impact activity, and therefore we wanted to provide as much flexibility
|
||||
// as possible in the underlying implementation.
|
||||
//
|
||||
// Note that logging should still be considered high-impact, and should not be
|
||||
// done in the hot path. If necessary, logging statements should be protected
|
||||
// with guards regarding the logging level. For example,
|
||||
//
|
||||
// if log.IsLogging(log.Debug) {
|
||||
// log.Debugf(...)
|
||||
// }
|
||||
//
|
||||
// This is because the log.Debugf(...) statement alone will generate a
|
||||
// significant amount of garbage and churn in many cases, even if no log
|
||||
// message is ultimately emitted.
|
||||
//
|
||||
// +checkalignedignore
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
stdlog "log"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/linewriter"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// Level is the log level.
|
||||
type Level uint32
|
||||
|
||||
// The following levels are fixed, and can never be changed. Since some control
|
||||
// RPCs allow for changing the level as an integer, it is only possible to add
|
||||
// additional levels, and the existing one cannot be removed.
|
||||
const (
|
||||
// Warning indicates that output should always be emitted.
|
||||
Warning Level = iota
|
||||
|
||||
// Info indicates that output should normally be emitted.
|
||||
Info
|
||||
|
||||
// Debug indicates that output should not normally be emitted.
|
||||
Debug
|
||||
)
|
||||
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case Warning:
|
||||
return "Warning"
|
||||
case Info:
|
||||
return "Info"
|
||||
case Debug:
|
||||
return "Debug"
|
||||
default:
|
||||
return fmt.Sprintf("Invalid level: %d", l)
|
||||
}
|
||||
}
|
||||
|
||||
// Emitter is the final destination for logs.
|
||||
type Emitter interface {
|
||||
// Emit emits the given log statement. This allows for control over the
|
||||
// timestamp used for logging.
|
||||
Emit(depth int, level Level, timestamp time.Time, format string, v ...any)
|
||||
}
|
||||
|
||||
// Writer writes the output to the given writer.
|
||||
type Writer struct {
|
||||
// Next is where output is written.
|
||||
Next io.Writer
|
||||
|
||||
// mu protects fields below.
|
||||
mu sync.Mutex
|
||||
|
||||
// errors counts failures to write log messages so it can be reported
|
||||
// when writer start to work again. Needs to be accessed using atomics
|
||||
// to make race detector happy because it's read outside the mutex.
|
||||
// +checklocks
|
||||
atomicErrors int32
|
||||
}
|
||||
|
||||
// Write writes out the given bytes, handling non-blocking sockets.
|
||||
func (l *Writer) Write(data []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
for n < len(data) {
|
||||
w, err := l.Next.Write(data[n:])
|
||||
n += w
|
||||
|
||||
// Is it a non-blocking socket?
|
||||
if pathErr, ok := err.(*os.PathError); ok && pathErr.Timeout() {
|
||||
runtime.Gosched()
|
||||
continue
|
||||
}
|
||||
|
||||
// Some other error?
|
||||
if err != nil {
|
||||
l.mu.Lock()
|
||||
atomic.AddInt32(&l.atomicErrors, 1)
|
||||
l.mu.Unlock()
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Do we need to end with a '\n'?
|
||||
if len(data) == 0 || data[len(data)-1] != '\n' {
|
||||
l.Write([]byte{'\n'})
|
||||
}
|
||||
|
||||
// Dirty read in case there were errors (rare).
|
||||
if atomic.LoadInt32(&l.atomicErrors) > 0 {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
// Recheck condition under lock.
|
||||
if e := atomic.LoadInt32(&l.atomicErrors); e > 0 {
|
||||
msg := fmt.Sprintf("\n*** Dropped %d log messages ***\n", e)
|
||||
if _, err := l.Next.Write([]byte(msg)); err == nil {
|
||||
atomic.StoreInt32(&l.atomicErrors, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Emit emits the message.
|
||||
func (l *Writer) Emit(_ int, _ Level, _ time.Time, format string, args ...any) {
|
||||
fmt.Fprintf(l, format, args...)
|
||||
}
|
||||
|
||||
// MultiEmitter is an emitter that emits to multiple Emitters.
|
||||
type MultiEmitter []Emitter
|
||||
|
||||
// Emit emits to all emitters.
|
||||
func (m *MultiEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) {
|
||||
for _, e := range *m {
|
||||
e.Emit(1+depth, level, timestamp, format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogger is implemented by testing.T and testing.B.
|
||||
type TestLogger interface {
|
||||
Logf(format string, v ...any)
|
||||
}
|
||||
|
||||
// TestEmitter may be used for wrapping tests.
|
||||
type TestEmitter struct {
|
||||
TestLogger
|
||||
}
|
||||
|
||||
// Emit emits to the TestLogger.
|
||||
func (t *TestEmitter) Emit(_ int, level Level, timestamp time.Time, format string, v ...any) {
|
||||
t.Logf(format, v...)
|
||||
}
|
||||
|
||||
// Logger is a high-level logging interface. It is in fact, not used within the
|
||||
// log package. Rather it is provided for others to provide contextual loggers
|
||||
// that may append some addition information to log statement. BasicLogger
|
||||
// satisfies this interface, and may be passed around as a Logger.
|
||||
type Logger interface {
|
||||
// Debugf logs a debug statement.
|
||||
Debugf(format string, v ...any)
|
||||
|
||||
// Infof logs at an info level.
|
||||
Infof(format string, v ...any)
|
||||
|
||||
// Warningf logs at a warning level.
|
||||
Warningf(format string, v ...any)
|
||||
|
||||
// IsLogging returns true iff this level is being logged. This may be
|
||||
// used to short-circuit expensive operations for debugging calls.
|
||||
IsLogging(level Level) bool
|
||||
}
|
||||
|
||||
// BasicLogger is the default implementation of Logger.
|
||||
type BasicLogger struct {
|
||||
Level
|
||||
Emitter
|
||||
}
|
||||
|
||||
// Debugf implements logger.Debugf.
|
||||
func (l *BasicLogger) Debugf(format string, v ...any) {
|
||||
l.DebugfAtDepth(1, format, v...)
|
||||
}
|
||||
|
||||
// Infof implements logger.Infof.
|
||||
func (l *BasicLogger) Infof(format string, v ...any) {
|
||||
l.InfofAtDepth(1, format, v...)
|
||||
}
|
||||
|
||||
// Warningf implements logger.Warningf.
|
||||
func (l *BasicLogger) Warningf(format string, v ...any) {
|
||||
l.WarningfAtDepth(1, format, v...)
|
||||
}
|
||||
|
||||
// DebugfAtDepth logs at a specific depth.
|
||||
func (l *BasicLogger) DebugfAtDepth(depth int, format string, v ...any) {
|
||||
if l.IsLogging(Debug) {
|
||||
l.Emit(1+depth, Debug, time.Now(), format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// InfofAtDepth logs at a specific depth.
|
||||
func (l *BasicLogger) InfofAtDepth(depth int, format string, v ...any) {
|
||||
if l.IsLogging(Info) {
|
||||
l.Emit(1+depth, Info, time.Now(), format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// WarningfAtDepth logs at a specific depth.
|
||||
func (l *BasicLogger) WarningfAtDepth(depth int, format string, v ...any) {
|
||||
if l.IsLogging(Warning) {
|
||||
l.Emit(1+depth, Warning, time.Now(), format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// IsLogging implements logger.IsLogging.
|
||||
func (l *BasicLogger) IsLogging(level Level) bool {
|
||||
return atomic.LoadUint32((*uint32)(&l.Level)) >= uint32(level)
|
||||
}
|
||||
|
||||
// SetLevel sets the logging level.
|
||||
func (l *BasicLogger) SetLevel(level Level) {
|
||||
atomic.StoreUint32((*uint32)(&l.Level), uint32(level))
|
||||
}
|
||||
|
||||
// logMu protects Log below. We use atomic operations to read the value, but
|
||||
// updates require logMu to ensure consistency.
|
||||
var logMu sync.Mutex
|
||||
|
||||
// log is the default logger.
|
||||
var log atomic.Pointer[BasicLogger]
|
||||
|
||||
// Log retrieves the global logger.
|
||||
func Log() *BasicLogger {
|
||||
return log.Load()
|
||||
}
|
||||
|
||||
// SetTarget sets the log target.
|
||||
//
|
||||
// This is not thread safe and shouldn't be called concurrently with any
|
||||
// logging calls.
|
||||
//
|
||||
// SetTarget should be called before any instances of log.Log() to avoid race conditions
|
||||
func SetTarget(target Emitter) {
|
||||
logMu.Lock()
|
||||
defer logMu.Unlock()
|
||||
oldLog := Log()
|
||||
log.Store(&BasicLogger{Level: oldLog.Level, Emitter: target})
|
||||
}
|
||||
|
||||
// SetLevel sets the log level.
|
||||
func SetLevel(newLevel Level) {
|
||||
Log().SetLevel(newLevel)
|
||||
}
|
||||
|
||||
// Debugf logs to the global logger.
|
||||
func Debugf(format string, v ...any) {
|
||||
Log().DebugfAtDepth(1, format, v...)
|
||||
}
|
||||
|
||||
// Infof logs to the global logger.
|
||||
func Infof(format string, v ...any) {
|
||||
Log().InfofAtDepth(1, format, v...)
|
||||
}
|
||||
|
||||
// Warningf logs to the global logger.
|
||||
func Warningf(format string, v ...any) {
|
||||
Log().WarningfAtDepth(1, format, v...)
|
||||
}
|
||||
|
||||
// DebugfAtDepth logs to the global logger.
|
||||
func DebugfAtDepth(depth int, format string, v ...any) {
|
||||
Log().DebugfAtDepth(1+depth, format, v...)
|
||||
}
|
||||
|
||||
// InfofAtDepth logs to the global logger.
|
||||
func InfofAtDepth(depth int, format string, v ...any) {
|
||||
Log().InfofAtDepth(1+depth, format, v...)
|
||||
}
|
||||
|
||||
// WarningfAtDepth logs to the global logger.
|
||||
func WarningfAtDepth(depth int, format string, v ...any) {
|
||||
Log().WarningfAtDepth(1+depth, format, v...)
|
||||
}
|
||||
|
||||
// defaultStackSize is the default buffer size to allocate for stack traces.
|
||||
const defaultStackSize = 1 << 16 // 64KB
|
||||
|
||||
// maxStackSize is the maximum buffer size to allocate for stack traces.
|
||||
const maxStackSize = 1 << 26 // 64MB
|
||||
|
||||
// Stacks returns goroutine stacks, like panic.
|
||||
func Stacks(all bool) []byte {
|
||||
var trace []byte
|
||||
for s := defaultStackSize; s <= maxStackSize; s *= 4 {
|
||||
trace = make([]byte, s)
|
||||
nbytes := runtime.Stack(trace, all)
|
||||
if nbytes == s {
|
||||
continue
|
||||
}
|
||||
return trace[:nbytes]
|
||||
}
|
||||
trace = append(trace, []byte("\n\n...<too large, truncated>")...)
|
||||
return trace
|
||||
}
|
||||
|
||||
// stackRegexp matches one level within a stack trace.
|
||||
var stackRegexp = regexp.MustCompile("(?m)^\\S+\\(.*\\)$\\r?\\n^\\t\\S+:\\d+.*$\\r?\\n")
|
||||
|
||||
// LocalStack returns the local goroutine stack, excluding the top N entries.
|
||||
// LocalStack's own entry is excluded by default and does not need to be counted in excludeTopN.
|
||||
func LocalStack(excludeTopN int) []byte {
|
||||
replaceNext := excludeTopN + 1
|
||||
return stackRegexp.ReplaceAllFunc(Stacks(false), func(s []byte) []byte {
|
||||
if replaceNext > 0 {
|
||||
replaceNext--
|
||||
return nil
|
||||
}
|
||||
return s
|
||||
})
|
||||
}
|
||||
|
||||
// Traceback logs the given message and dumps a stacktrace of the current
|
||||
// goroutine.
|
||||
//
|
||||
// This will be print a traceback, tb, as Warningf(format+":\n%s", v..., tb).
|
||||
func Traceback(format string, v ...any) {
|
||||
v = append(v, Stacks(false))
|
||||
Warningf(format+":\n%s", v...)
|
||||
}
|
||||
|
||||
// TracebackAll logs the given message and dumps a stacktrace of all goroutines.
|
||||
//
|
||||
// This will be print a traceback, tb, as Warningf(format+":\n%s", v..., tb).
|
||||
func TracebackAll(format string, v ...any) {
|
||||
v = append(v, Stacks(true))
|
||||
Warningf(format+":\n%s", v...)
|
||||
}
|
||||
|
||||
// IsLogging returns whether the global logger is logging.
|
||||
func IsLogging(level Level) bool {
|
||||
return Log().IsLogging(level)
|
||||
}
|
||||
|
||||
// CopyStandardLogTo redirects the stdlib log package global output to the global
|
||||
// logger for the specified level.
|
||||
func CopyStandardLogTo(l Level) error {
|
||||
var f func(string, ...any)
|
||||
|
||||
switch l {
|
||||
case Debug:
|
||||
f = Debugf
|
||||
case Info:
|
||||
f = Infof
|
||||
case Warning:
|
||||
f = Warningf
|
||||
default:
|
||||
return fmt.Errorf("unknown log level %v", l)
|
||||
}
|
||||
|
||||
stdlog.SetOutput(linewriter.NewWriter(func(p []byte) {
|
||||
// We must not retain p, but log formatting is not required to
|
||||
// be synchronous (though the in-package implementations are),
|
||||
// so we must make a copy.
|
||||
b := make([]byte, len(p))
|
||||
copy(b, p)
|
||||
|
||||
f("%s", b)
|
||||
}))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Store the initial value for the log.
|
||||
log.Store(&BasicLogger{Level: Info, Emitter: GoogleEmitter{&Writer{Next: os.Stderr}}})
|
||||
}
|
||||
63
vendor/gvisor.dev/gvisor/pkg/log/rate_limited.go
vendored
Normal file
63
vendor/gvisor.dev/gvisor/pkg/log/rate_limited.go
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type rateLimitedLogger struct {
|
||||
logger Logger
|
||||
limit *rate.Limiter
|
||||
}
|
||||
|
||||
func (rl *rateLimitedLogger) Debugf(format string, v ...any) {
|
||||
if rl.limit.Allow() {
|
||||
rl.logger.Debugf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *rateLimitedLogger) Infof(format string, v ...any) {
|
||||
if rl.limit.Allow() {
|
||||
rl.logger.Infof(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *rateLimitedLogger) Warningf(format string, v ...any) {
|
||||
if rl.limit.Allow() {
|
||||
rl.logger.Warningf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *rateLimitedLogger) IsLogging(level Level) bool {
|
||||
return rl.logger.IsLogging(level)
|
||||
}
|
||||
|
||||
// BasicRateLimitedLogger returns a Logger that logs to the global logger no
|
||||
// more than once per the provided duration.
|
||||
func BasicRateLimitedLogger(every time.Duration) Logger {
|
||||
return RateLimitedLogger(Log(), every)
|
||||
}
|
||||
|
||||
// RateLimitedLogger returns a Logger that logs to the provided logger no more
|
||||
// than once per the provided duration.
|
||||
func RateLimitedLogger(logger Logger, every time.Duration) Logger {
|
||||
return &rateLimitedLogger{
|
||||
logger: logger,
|
||||
limit: rate.NewLimiter(rate.Every(every), 1),
|
||||
}
|
||||
}
|
||||
28
vendor/gvisor.dev/gvisor/pkg/rand/rand.go
vendored
Normal file
28
vendor/gvisor.dev/gvisor/pkg/rand/rand.go
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package rand
|
||||
|
||||
import "crypto/rand"
|
||||
|
||||
// Reader is the default reader.
|
||||
var Reader = rand.Reader
|
||||
|
||||
// Read implements io.Reader.Read.
|
||||
func Read(b []byte) (int, error) {
|
||||
return rand.Read(b)
|
||||
}
|
||||
82
vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go
vendored
Normal file
82
vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rand
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// reader implements an io.Reader that returns pseudorandom bytes.
|
||||
type reader struct {
|
||||
once sync.Once
|
||||
useGetrandom bool
|
||||
}
|
||||
|
||||
// Read implements io.Reader.Read.
|
||||
func (r *reader) Read(p []byte) (int, error) {
|
||||
r.once.Do(func() {
|
||||
_, err := unix.Getrandom(p, 0)
|
||||
if err != unix.ENOSYS {
|
||||
r.useGetrandom = true
|
||||
}
|
||||
})
|
||||
|
||||
if r.useGetrandom {
|
||||
return unix.Getrandom(p, 0)
|
||||
}
|
||||
return rand.Read(p)
|
||||
}
|
||||
|
||||
// bufferedReader implements a threadsafe buffered io.Reader.
|
||||
type bufferedReader struct {
|
||||
mu sync.Mutex
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
// Read implements io.Reader.Read.
|
||||
func (b *bufferedReader) Read(p []byte) (int, error) {
|
||||
// In Linux, reads of up to page size bytes will always complete fully.
|
||||
// See drivers/char/random.c:get_random_bytes_user().
|
||||
// NOTE(gvisor.dev/issue/9445): Some applications rely on this behavior.
|
||||
const pageSize = 4096
|
||||
min := len(p)
|
||||
if min > pageSize {
|
||||
min = pageSize
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return io.ReadAtLeast(b.r, p, min)
|
||||
}
|
||||
|
||||
// Reader is the default reader.
|
||||
var Reader io.Reader = &bufferedReader{r: bufio.NewReader(&reader{})}
|
||||
|
||||
// Read reads from the default reader.
|
||||
func Read(b []byte) (int, error) {
|
||||
return io.ReadFull(Reader, b)
|
||||
}
|
||||
|
||||
// Init can be called to make sure /dev/urandom is pre-opened on kernels that
|
||||
// do not support getrandom(2).
|
||||
func Init() error {
|
||||
p := make([]byte, 1)
|
||||
_, err := Read(p)
|
||||
return err
|
||||
}
|
||||
3
vendor/gvisor.dev/gvisor/pkg/rand/rand_linux_state_autogen.go
vendored
Normal file
3
vendor/gvisor.dev/gvisor/pkg/rand/rand_linux_state_autogen.go
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package rand
|
||||
6
vendor/gvisor.dev/gvisor/pkg/rand/rand_state_autogen.go
vendored
Normal file
6
vendor/gvisor.dev/gvisor/pkg/rand/rand_state_autogen.go
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package rand
|
||||
131
vendor/gvisor.dev/gvisor/pkg/rand/rng.go
vendored
Normal file
131
vendor/gvisor.dev/gvisor/pkg/rand/rng.go
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package rand implements a cryptographically secure pseudorandom number
|
||||
// generator.
|
||||
package rand
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// RNG exposes convenience functions based on a cryptographically secure
|
||||
// io.Reader.
|
||||
type RNG struct {
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
// RNGFrom returns a new RNG. r must be a cryptographically secure io.Reader.
|
||||
func RNGFrom(r io.Reader) RNG {
|
||||
return RNG{Reader: r}
|
||||
}
|
||||
|
||||
// Uint16 is analogous to the standard library's math/rand.Uint16.
|
||||
func (rg *RNG) Uint16() uint16 {
|
||||
var data [2]byte
|
||||
if _, err := rg.Reader.Read(data[:]); err != nil {
|
||||
panic(fmt.Sprintf("Read() failed: %v", err))
|
||||
}
|
||||
return binary.NativeEndian.Uint16(data[:])
|
||||
}
|
||||
|
||||
// Uint32 is analogous to the standard library's math/rand.Uint32.
|
||||
func (rg *RNG) Uint32() uint32 {
|
||||
var data [4]byte
|
||||
if _, err := rg.Reader.Read(data[:]); err != nil {
|
||||
panic(fmt.Sprintf("Read() failed: %v", err))
|
||||
}
|
||||
return binary.NativeEndian.Uint32(data[:])
|
||||
}
|
||||
|
||||
// Int63n is analogous to the standard library's math/rand.Int63n.
|
||||
func (rg *RNG) Int63n(n int64) int64 {
|
||||
// Based on Go's rand package implementation, but using
|
||||
// cryptographically secure random numbers.
|
||||
if n <= 0 {
|
||||
panic(fmt.Sprintf("n must be positive, but got %d", n))
|
||||
}
|
||||
|
||||
// This can be done quickly when n is a power of 2.
|
||||
if n&(n-1) == 0 {
|
||||
return int64(rg.Uint64()) & (n - 1)
|
||||
}
|
||||
|
||||
// The naive approach would be to return rg.Int63()%n, but we need the
|
||||
// random number to be fair. It shouldn't be biased towards certain
|
||||
// results, but simple modular math can be very biased. For example, if
|
||||
// n is 40% of the maximum int64, then the output values of rg.Int63
|
||||
// map to return values as follows:
|
||||
//
|
||||
// - The first 40% of values map to themselves.
|
||||
// - The second 40% map to themselves - maximum int64.
|
||||
// - The remaining 20% map to the themselves - 2 * (maximum int64),
|
||||
// i.e. the first half of possible output values.
|
||||
//
|
||||
// And thus 60% of results map the first half of possible output
|
||||
// values, and 40% map the second half. Oops!
|
||||
//
|
||||
// We use the same trick as Go to deal with this: shave off the last
|
||||
// segment (the 20% in our example) to make the RNG more fair.
|
||||
//
|
||||
// In the worst case, n is just over half of maximum int64, meaning
|
||||
// that the upper half of rg.Int63 return values are bad. So each call
|
||||
// to rg.Int63 has, at worst, a 50% chance of needing a retry.
|
||||
maximum := int64((1 << 63) - 1 - (1<<63)%uint64(n))
|
||||
ret := rg.Int63()
|
||||
for ret > maximum {
|
||||
ret = rg.Int63()
|
||||
}
|
||||
return ret % n
|
||||
}
|
||||
|
||||
// Int63 is analogous to the standard library's math/rand.Int63.
|
||||
func (rg *RNG) Int63() int64 {
|
||||
return ((1 << 63) - 1) & int64(rg.Uint64())
|
||||
}
|
||||
|
||||
// Uint64 is analogous to the standard library's math/rand.Uint64.
|
||||
func (rg *RNG) Uint64() uint64 {
|
||||
var data [8]byte
|
||||
if _, err := rg.Reader.Read(data[:]); err != nil {
|
||||
panic(fmt.Sprintf("Read() failed: %v", err))
|
||||
}
|
||||
return binary.NativeEndian.Uint64(data[:])
|
||||
}
|
||||
|
||||
// Uint32 is analogous to the standard library's math/rand.Uint32.
|
||||
func Uint32() uint32 {
|
||||
rng := RNG{Reader: Reader}
|
||||
return rng.Uint32()
|
||||
}
|
||||
|
||||
// Int63n is analogous to the standard library's math/rand.Int63n.
|
||||
func Int63n(n int64) int64 {
|
||||
rng := RNG{Reader: Reader}
|
||||
return rng.Int63n(n)
|
||||
}
|
||||
|
||||
// Int63 is analogous to the standard library's math/rand.Int63.
|
||||
func Int63() int64 {
|
||||
rng := RNG{Reader: Reader}
|
||||
return rng.Int63()
|
||||
}
|
||||
|
||||
// Uint64 is analogous to the standard library's math/rand.Uint64.
|
||||
func Uint64() uint64 {
|
||||
rng := RNG{Reader: Reader}
|
||||
return rng.Uint64()
|
||||
}
|
||||
196
vendor/gvisor.dev/gvisor/pkg/refs/refcounter.go
vendored
Normal file
196
vendor/gvisor.dev/gvisor/pkg/refs/refcounter.go
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package refs defines an interface for reference counted objects.
|
||||
package refs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/atomicbitops"
|
||||
"gvisor.dev/gvisor/pkg/context"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// RefCounter is the interface to be implemented by objects that are reference
|
||||
// counted.
|
||||
type RefCounter interface {
|
||||
// IncRef increments the reference counter on the object.
|
||||
IncRef()
|
||||
|
||||
// DecRef decrements the object's reference count. Users of refs_template.Refs
|
||||
// may specify a destructor to be called once the reference count reaches zero.
|
||||
DecRef(ctx context.Context)
|
||||
}
|
||||
|
||||
// TryRefCounter is like RefCounter but allow the ref increment to be tried.
|
||||
type TryRefCounter interface {
|
||||
RefCounter
|
||||
|
||||
// TryIncRef attempts to increment the reference count, but may fail if all
|
||||
// references have already been dropped, in which case it returns false. If
|
||||
// true is returned, then a valid reference is now held on the object.
|
||||
TryIncRef() bool
|
||||
}
|
||||
|
||||
// LeakMode configures the leak checker.
|
||||
type LeakMode uint32
|
||||
|
||||
const (
|
||||
// NoLeakChecking indicates that no effort should be made to check for
|
||||
// leaks.
|
||||
NoLeakChecking LeakMode = iota
|
||||
|
||||
// LeaksLogWarning indicates that a warning should be logged when leaks
|
||||
// are found.
|
||||
LeaksLogWarning
|
||||
|
||||
// LeaksPanic indidcates that a panic should be issued when leaks are found.
|
||||
LeaksPanic
|
||||
)
|
||||
|
||||
// Set implements flag.Value.
|
||||
func (l *LeakMode) Set(v string) error {
|
||||
switch v {
|
||||
case "disabled":
|
||||
*l = NoLeakChecking
|
||||
case "log-names":
|
||||
*l = LeaksLogWarning
|
||||
case "panic":
|
||||
*l = LeaksPanic
|
||||
default:
|
||||
return fmt.Errorf("invalid ref leak mode %q", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements flag.Value.
|
||||
func (l *LeakMode) Get() any {
|
||||
return *l
|
||||
}
|
||||
|
||||
// String implements flag.Value.
|
||||
func (l LeakMode) String() string {
|
||||
switch l {
|
||||
case NoLeakChecking:
|
||||
return "disabled"
|
||||
case LeaksLogWarning:
|
||||
return "log-names"
|
||||
case LeaksPanic:
|
||||
return "panic"
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid ref leak mode %d", l))
|
||||
}
|
||||
}
|
||||
|
||||
// leakMode stores the current mode for the reference leak checker.
|
||||
//
|
||||
// Values must be one of the LeakMode values.
|
||||
//
|
||||
// leakMode must be accessed atomically.
|
||||
var leakMode atomicbitops.Uint32
|
||||
|
||||
// SetLeakMode configures the reference leak checker.
|
||||
func SetLeakMode(mode LeakMode) {
|
||||
leakMode.Store(uint32(mode))
|
||||
}
|
||||
|
||||
// GetLeakMode returns the current leak mode.
|
||||
func GetLeakMode() LeakMode {
|
||||
return LeakMode(leakMode.Load())
|
||||
}
|
||||
|
||||
const maxStackFrames = 40
|
||||
|
||||
type fileLine struct {
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
// A stackKey is a representation of a stack frame for use as a map key.
|
||||
//
|
||||
// The fileLine type is used as PC values seem to vary across collections, even
|
||||
// for the same call stack.
|
||||
type stackKey [maxStackFrames]fileLine
|
||||
|
||||
var stackCache = struct {
|
||||
sync.Mutex
|
||||
entries map[stackKey][]uintptr
|
||||
}{entries: map[stackKey][]uintptr{}}
|
||||
|
||||
func makeStackKey(pcs []uintptr) stackKey {
|
||||
frames := runtime.CallersFrames(pcs)
|
||||
var key stackKey
|
||||
keySlice := key[:0]
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
keySlice = append(keySlice, fileLine{frame.File, frame.Line})
|
||||
|
||||
if !more || len(keySlice) == len(key) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// RecordStack constructs and returns the PCs on the current stack.
|
||||
func RecordStack() []uintptr {
|
||||
pcs := make([]uintptr, maxStackFrames)
|
||||
n := runtime.Callers(1, pcs)
|
||||
if n == 0 {
|
||||
// No pcs available. Stop now.
|
||||
//
|
||||
// This can happen if the first argument to runtime.Callers
|
||||
// is large.
|
||||
return nil
|
||||
}
|
||||
pcs = pcs[:n]
|
||||
key := makeStackKey(pcs)
|
||||
stackCache.Lock()
|
||||
v, ok := stackCache.entries[key]
|
||||
if !ok {
|
||||
// Reallocate to prevent pcs from escaping.
|
||||
v = append([]uintptr(nil), pcs...)
|
||||
stackCache.entries[key] = v
|
||||
}
|
||||
stackCache.Unlock()
|
||||
return v
|
||||
}
|
||||
|
||||
// FormatStack converts the given stack into a readable format.
|
||||
func FormatStack(pcs []uintptr) string {
|
||||
frames := runtime.CallersFrames(pcs)
|
||||
var trace bytes.Buffer
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
fmt.Fprintf(&trace, "%s:%d: %s\n", frame.File, frame.Line, frame.Function)
|
||||
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
return trace.String()
|
||||
}
|
||||
|
||||
// OnExit is called on sandbox exit. It runs GC to enqueue refcount finalizers,
|
||||
// which check for reference leaks. There is no way to guarantee that every
|
||||
// finalizer will run before exiting, but this at least ensures that they will
|
||||
// be discovered/enqueued by GC.
|
||||
func OnExit() {
|
||||
if LeakMode(leakMode.Load()) != NoLeakChecking {
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
179
vendor/gvisor.dev/gvisor/pkg/refs/refs_map.go
vendored
Normal file
179
vendor/gvisor.dev/gvisor/pkg/refs/refs_map.go
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package refs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/log"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// liveObjects is a global map of reference-counted objects. Objects are
|
||||
// inserted when leak check is enabled, and they are removed when they are
|
||||
// destroyed. It is protected by liveObjectsMu.
|
||||
liveObjects map[CheckedObject]struct{}
|
||||
liveObjectsMu sync.Mutex
|
||||
)
|
||||
|
||||
// CheckedObject represents a reference-counted object with an informative
|
||||
// leak detection message.
|
||||
type CheckedObject interface {
|
||||
// RefType is the type of the reference-counted object.
|
||||
RefType() string
|
||||
|
||||
// LeakMessage supplies a warning to be printed upon leak detection.
|
||||
LeakMessage() string
|
||||
|
||||
// LogRefs indicates whether reference-related events should be logged.
|
||||
LogRefs() bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
liveObjects = make(map[CheckedObject]struct{})
|
||||
}
|
||||
|
||||
// LeakCheckEnabled returns whether leak checking is enabled. The following
|
||||
// functions should only be called if it returns true.
|
||||
func LeakCheckEnabled() bool {
|
||||
mode := GetLeakMode()
|
||||
return mode != NoLeakChecking
|
||||
}
|
||||
|
||||
// leakCheckPanicEnabled returns whether DoLeakCheck() should panic when leaks
|
||||
// are detected.
|
||||
func leakCheckPanicEnabled() bool {
|
||||
return GetLeakMode() == LeaksPanic
|
||||
}
|
||||
|
||||
// Register adds obj to the live object map.
|
||||
func Register(obj CheckedObject) {
|
||||
if LeakCheckEnabled() {
|
||||
liveObjectsMu.Lock()
|
||||
if _, ok := liveObjects[obj]; ok {
|
||||
panic(fmt.Sprintf("Unexpected entry in leak checking map: reference %p already added", obj))
|
||||
}
|
||||
liveObjects[obj] = struct{}{}
|
||||
liveObjectsMu.Unlock()
|
||||
if LeakCheckEnabled() && obj.LogRefs() {
|
||||
logEvent(obj, "registered")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unregister removes obj from the live object map.
|
||||
func Unregister(obj CheckedObject) {
|
||||
if LeakCheckEnabled() {
|
||||
liveObjectsMu.Lock()
|
||||
defer liveObjectsMu.Unlock()
|
||||
if _, ok := liveObjects[obj]; !ok {
|
||||
panic(fmt.Sprintf("Expected to find entry in leak checking map for reference %p", obj))
|
||||
}
|
||||
delete(liveObjects, obj)
|
||||
if LeakCheckEnabled() && obj.LogRefs() {
|
||||
logEvent(obj, "unregistered")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LogIncRef logs a reference increment.
|
||||
func LogIncRef(obj CheckedObject, refs int64) {
|
||||
if LeakCheckEnabled() && obj.LogRefs() {
|
||||
logEvent(obj, fmt.Sprintf("IncRef to %d", refs))
|
||||
}
|
||||
}
|
||||
|
||||
// LogTryIncRef logs a successful TryIncRef call.
|
||||
func LogTryIncRef(obj CheckedObject, refs int64) {
|
||||
if LeakCheckEnabled() && obj.LogRefs() {
|
||||
logEvent(obj, fmt.Sprintf("TryIncRef to %d", refs))
|
||||
}
|
||||
}
|
||||
|
||||
// LogDecRef logs a reference decrement.
|
||||
func LogDecRef(obj CheckedObject, refs int64) {
|
||||
if LeakCheckEnabled() && obj.LogRefs() {
|
||||
logEvent(obj, fmt.Sprintf("DecRef to %d", refs))
|
||||
}
|
||||
}
|
||||
|
||||
// logEvent logs a message for the given reference-counted object.
|
||||
//
|
||||
// obj.LogRefs() should be checked before calling logEvent, in order to avoid
|
||||
// calling any text processing needed to evaluate msg.
|
||||
func logEvent(obj CheckedObject, msg string) {
|
||||
log.Infof("[%s %p] %s:\n%s", obj.RefType(), obj, msg, FormatStack(RecordStack()))
|
||||
}
|
||||
|
||||
// checkOnce makes sure that leak checking is only done once. DoLeakCheck is
|
||||
// called from multiple places (which may overlap) to cover different sandbox
|
||||
// exit scenarios.
|
||||
var checkOnce sync.Once
|
||||
|
||||
// DoLeakCheck iterates through the live object map and logs a message for each
|
||||
// object. It should be called when no reference-counted objects are reachable
|
||||
// anymore, at which point anything left in the map is considered a leak. On
|
||||
// multiple calls, only the first call will perform the leak check.
|
||||
func DoLeakCheck() {
|
||||
if LeakCheckEnabled() {
|
||||
checkOnce.Do(doLeakCheck)
|
||||
}
|
||||
}
|
||||
|
||||
// DoRepeatedLeakCheck is the same as DoLeakCheck except that it can be called
|
||||
// multiple times by the caller to incrementally perform leak checking.
|
||||
func DoRepeatedLeakCheck() {
|
||||
if LeakCheckEnabled() {
|
||||
doLeakCheck()
|
||||
}
|
||||
}
|
||||
|
||||
type leakCheckDisabled interface {
|
||||
LeakCheckDisabled() bool
|
||||
}
|
||||
|
||||
// CleanupSync is used to wait for async cleanup actions.
|
||||
var CleanupSync sync.WaitGroup
|
||||
|
||||
func doLeakCheck() {
|
||||
CleanupSync.Wait()
|
||||
liveObjectsMu.Lock()
|
||||
defer liveObjectsMu.Unlock()
|
||||
leaked := len(liveObjects)
|
||||
if leaked > 0 {
|
||||
n := 0
|
||||
msg := fmt.Sprintf("Leak checking detected %d leaked objects:\n", leaked)
|
||||
for obj := range liveObjects {
|
||||
skip := false
|
||||
if o, ok := obj.(leakCheckDisabled); ok {
|
||||
skip = o.LeakCheckDisabled()
|
||||
}
|
||||
if skip {
|
||||
log.Debugf(obj.LeakMessage())
|
||||
continue
|
||||
}
|
||||
msg += obj.LeakMessage() + "\n"
|
||||
n++
|
||||
}
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
if leakCheckPanicEnabled() {
|
||||
panic(msg)
|
||||
}
|
||||
log.Warningf(msg)
|
||||
}
|
||||
}
|
||||
3
vendor/gvisor.dev/gvisor/pkg/refs/refs_state_autogen.go
vendored
Normal file
3
vendor/gvisor.dev/gvisor/pkg/refs/refs_state_autogen.go
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package refs
|
||||
477
vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go
vendored
Normal file
477
vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go
vendored
Normal file
@@ -0,0 +1,477 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package sleep allows goroutines to efficiently sleep on multiple sources of
|
||||
// notifications (wakers). It offers O(1) complexity, which is different from
|
||||
// multi-channel selects which have O(n) complexity (where n is the number of
|
||||
// channels) and a considerable constant factor.
|
||||
//
|
||||
// It is similar to edge-triggered epoll waits, where the user registers each
|
||||
// object of interest once, and then can repeatedly wait on all of them.
|
||||
//
|
||||
// A Waker object is used to wake a sleeping goroutine (G) up, or prevent it
|
||||
// from going to sleep next. A Sleeper object is used to receive notifications
|
||||
// from wakers, and if no notifications are available, to optionally sleep until
|
||||
// one becomes available.
|
||||
//
|
||||
// A Waker can be associated with at most one Sleeper, but a Sleeper can be
|
||||
// associated with multiple Wakers. A Sleeper has a list of asserted (ready)
|
||||
// wakers; when Fetch() is called repeatedly, elements from this list are
|
||||
// returned until the list becomes empty in which case the goroutine goes to
|
||||
// sleep. When Assert() is called on a Waker, it adds itself to the Sleeper's
|
||||
// asserted list and wakes the G up from its sleep if needed.
|
||||
//
|
||||
// Sleeper objects are expected to be used as follows, with just one goroutine
|
||||
// executing this code:
|
||||
//
|
||||
// // One time set-up.
|
||||
// s := sleep.Sleeper{}
|
||||
// s.AddWaker(&w1)
|
||||
// s.AddWaker(&w2)
|
||||
//
|
||||
// // Called repeatedly.
|
||||
// for {
|
||||
// switch s.Fetch(true) {
|
||||
// case &w1:
|
||||
// // Do work triggered by w1 being asserted.
|
||||
// case &w2:
|
||||
// // Do work triggered by w2 being asserted.
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// And Waker objects are expected to call w.Assert() when they want the sleeper
|
||||
// to wake up and perform work.
|
||||
//
|
||||
// The notifications are edge-triggered, which means that if a Waker calls
|
||||
// Assert() several times before the sleeper has the chance to wake up, it will
|
||||
// only be notified once and should perform all pending work (alternatively, it
|
||||
// can also call Assert() on the waker, to ensure that it will wake up again).
|
||||
//
|
||||
// The "unsafeness" here is in the casts to/from unsafe.Pointer, which is safe
|
||||
// when only one type is used for each unsafe.Pointer (which is the case here),
|
||||
// we should just make sure that this remains the case in the future. The usage
|
||||
// of unsafe package could be confined to sharedWaker and sharedSleeper types
|
||||
// that would hold pointers in atomic.Pointers, but the go compiler currently
|
||||
// can't optimize these as well (it won't inline their method calls), which
|
||||
// reduces performance.
|
||||
package sleep
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// preparingG is stored in sleepers to indicate that they're preparing
|
||||
// to sleep.
|
||||
preparingG = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// assertedSleeper is a sentinel sleeper. A pointer to it is stored in
|
||||
// wakers that are asserted.
|
||||
assertedSleeper Sleeper
|
||||
)
|
||||
|
||||
// Sleeper allows a goroutine to sleep and receive wake up notifications from
|
||||
// Wakers in an efficient way.
|
||||
//
|
||||
// This is similar to edge-triggered epoll in that wakers are added to the
|
||||
// sleeper once and the sleeper can then repeatedly sleep in O(1) time while
|
||||
// waiting on all wakers.
|
||||
//
|
||||
// None of the methods in a Sleeper can be called concurrently. Wakers that have
|
||||
// been added to a sleeper A can only be added to another sleeper after A.Done()
|
||||
// returns. These restrictions allow this to be implemented lock-free.
|
||||
//
|
||||
// This struct is thread-compatible.
|
||||
//
|
||||
// +stateify savable
|
||||
type Sleeper struct {
|
||||
_ sync.NoCopy
|
||||
|
||||
// sharedList is a "stack" of asserted wakers. They atomically add
|
||||
// themselves to the front of this list as they become asserted.
|
||||
sharedList unsafe.Pointer `state:".(*Waker)"`
|
||||
|
||||
// localList is a list of asserted wakers that is only accessible to the
|
||||
// waiter, and thus doesn't have to be accessed atomically. When
|
||||
// fetching more wakers, the waiter will first go through this list, and
|
||||
// only when it's empty will it atomically fetch wakers from
|
||||
// sharedList.
|
||||
localList *Waker
|
||||
|
||||
// allWakers is a list with all wakers that have been added to this
|
||||
// sleeper. It is used during cleanup to remove associations.
|
||||
allWakers *Waker
|
||||
|
||||
// waitingG holds the G that is sleeping, if any. It is used by wakers
|
||||
// to determine which G, if any, they should wake.
|
||||
waitingG uintptr `state:"zero"`
|
||||
}
|
||||
|
||||
// saveSharedList is invoked by stateify.
|
||||
func (s *Sleeper) saveSharedList() *Waker {
|
||||
return (*Waker)(atomic.LoadPointer(&s.sharedList))
|
||||
}
|
||||
|
||||
// loadSharedList is invoked by stateify.
|
||||
func (s *Sleeper) loadSharedList(_ context.Context, w *Waker) {
|
||||
atomic.StorePointer(&s.sharedList, unsafe.Pointer(w))
|
||||
}
|
||||
|
||||
// AddWaker associates the given waker to the sleeper.
|
||||
func (s *Sleeper) AddWaker(w *Waker) {
|
||||
if w.allWakersNext != nil {
|
||||
panic("waker has non-nil allWakersNext; owned by another sleeper?")
|
||||
}
|
||||
if w.next != nil {
|
||||
panic("waker has non-nil next; queued in another sleeper?")
|
||||
}
|
||||
|
||||
// Add the waker to the list of all wakers.
|
||||
w.allWakersNext = s.allWakers
|
||||
s.allWakers = w
|
||||
|
||||
// Try to associate the waker with the sleeper. If it's already
|
||||
// asserted, we simply enqueue it in the "ready" list.
|
||||
for {
|
||||
p := (*Sleeper)(atomic.LoadPointer(&w.s))
|
||||
if p == &assertedSleeper {
|
||||
s.enqueueAssertedWaker(w, true /* wakep */)
|
||||
return
|
||||
}
|
||||
|
||||
if atomic.CompareAndSwapPointer(&w.s, usleeper(p), usleeper(s)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextWaker returns the next waker in the notification list, blocking if
|
||||
// needed. The parameter wakepOrSleep indicates that if the operation does not
|
||||
// block, then we will need to explicitly wake a runtime P.
|
||||
//
|
||||
// Precondition: wakepOrSleep may be true iff block is true.
|
||||
//
|
||||
//go:nosplit
|
||||
func (s *Sleeper) nextWaker(block, wakepOrSleep bool) *Waker {
|
||||
// Attempt to replenish the local list if it's currently empty.
|
||||
if s.localList == nil {
|
||||
for atomic.LoadPointer(&s.sharedList) == nil {
|
||||
// Fail request if caller requested that we
|
||||
// don't block.
|
||||
if !block {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Indicate to wakers that we're about to sleep,
|
||||
// this allows them to abort the wait by setting
|
||||
// waitingG back to zero (which we'll notice
|
||||
// before committing the sleep).
|
||||
atomic.StoreUintptr(&s.waitingG, preparingG)
|
||||
|
||||
// Check if something was queued while we were
|
||||
// preparing to sleep. We need this interleaving
|
||||
// to avoid missing wake ups.
|
||||
if atomic.LoadPointer(&s.sharedList) != nil {
|
||||
atomic.StoreUintptr(&s.waitingG, 0)
|
||||
break
|
||||
}
|
||||
|
||||
// Since we are sleeping for sure, we no longer
|
||||
// need to wakep once we get a value.
|
||||
wakepOrSleep = false
|
||||
|
||||
// Try to commit the sleep and report it to the
|
||||
// tracer as a select.
|
||||
//
|
||||
// gopark puts the caller to sleep and calls
|
||||
// commitSleep to decide whether to immediately
|
||||
// wake the caller up or to leave it sleeping.
|
||||
const traceEvGoBlockSelect = 24
|
||||
// See:runtime2.go in the go runtime package for
|
||||
// the values to pass as the waitReason here.
|
||||
const waitReasonSelect = 9
|
||||
sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceBlockSelect, 0)
|
||||
}
|
||||
|
||||
// Pull the shared list out and reverse it in the local
|
||||
// list. Given that wakers push themselves in reverse
|
||||
// order, we fix things here.
|
||||
v := (*Waker)(atomic.SwapPointer(&s.sharedList, nil))
|
||||
for v != nil {
|
||||
cur := v
|
||||
v = v.next
|
||||
|
||||
cur.next = s.localList
|
||||
s.localList = cur
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the waker in the front of the list.
|
||||
w := s.localList
|
||||
s.localList = w.next
|
||||
|
||||
// Do we need to wake a P?
|
||||
if wakepOrSleep {
|
||||
sync.Wakep()
|
||||
}
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
// commitSleep signals to wakers that the given g is now sleeping. Wakers can
|
||||
// then fetch it and wake it.
|
||||
//
|
||||
// The commit may fail if wakers have been asserted after our last check, in
|
||||
// which case they will have set s.waitingG to zero.
|
||||
//
|
||||
//go:norace
|
||||
//go:nosplit
|
||||
func commitSleep(g uintptr, waitingG unsafe.Pointer) bool {
|
||||
return sync.RaceUncheckedAtomicCompareAndSwapUintptr((*uintptr)(waitingG), preparingG, g)
|
||||
}
|
||||
|
||||
// fetch is the backing implementation for Fetch and AssertAndFetch.
|
||||
//
|
||||
// Preconditions are the same as nextWaker.
|
||||
//
|
||||
//go:nosplit
|
||||
func (s *Sleeper) fetch(block, wakepOrSleep bool) *Waker {
|
||||
for {
|
||||
w := s.nextWaker(block, wakepOrSleep)
|
||||
if w == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reassociate the waker with the sleeper. If the waker was
|
||||
// still asserted we can return it, otherwise try the next one.
|
||||
old := (*Sleeper)(atomic.SwapPointer(&w.s, usleeper(s)))
|
||||
if old == &assertedSleeper {
|
||||
return w
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch fetches the next wake-up notification. If a notification is
|
||||
// immediately available, the asserted waker is returned immediately.
|
||||
// Otherwise, the behavior depends on the value of 'block': if true, the
|
||||
// current goroutine blocks until a notification arrives and returns the
|
||||
// asserted waker; if false, nil will be returned.
|
||||
//
|
||||
// N.B. This method is *not* thread-safe. Only one goroutine at a time is
|
||||
// allowed to call this method.
|
||||
func (s *Sleeper) Fetch(block bool) *Waker {
|
||||
return s.fetch(block, false /* wakepOrSleep */)
|
||||
}
|
||||
|
||||
// AssertAndFetch asserts the given waker and fetches the next wake-up notification.
|
||||
// Note that this will always be blocking, since there is no value in joining a
|
||||
// non-blocking operation.
|
||||
//
|
||||
// N.B. Like Fetch, this method is *not* thread-safe. This will also yield the current
|
||||
// P to the next goroutine, avoiding associated scheduled overhead.
|
||||
//
|
||||
// +checkescape:all
|
||||
//
|
||||
//go:nosplit
|
||||
func (s *Sleeper) AssertAndFetch(n *Waker) *Waker {
|
||||
n.assert(false /* wakep */)
|
||||
return s.fetch(true /* block */, true /* wakepOrSleep*/)
|
||||
}
|
||||
|
||||
// Done is used to indicate that the caller won't use this Sleeper anymore. It
|
||||
// removes the association with all wakers so that they can be safely reused
|
||||
// by another sleeper after Done() returns.
|
||||
func (s *Sleeper) Done() {
|
||||
// Remove all associations that we can, and build a list of the ones we
|
||||
// could not. An association can be removed right away from waker w if
|
||||
// w.s has a pointer to the sleeper, that is, the waker is not asserted
|
||||
// yet. By atomically switching w.s to nil, we guarantee that
|
||||
// subsequent calls to Assert() on the waker will not result in it
|
||||
// being queued.
|
||||
for w := s.allWakers; w != nil; w = s.allWakers {
|
||||
next := w.allWakersNext // Before zapping.
|
||||
if atomic.CompareAndSwapPointer(&w.s, usleeper(s), nil) {
|
||||
w.allWakersNext = nil
|
||||
w.next = nil
|
||||
s.allWakers = next // Move ahead.
|
||||
continue
|
||||
}
|
||||
|
||||
// Dequeue exactly one waiter from the list, it may not be
|
||||
// this one but we know this one is in the process. We must
|
||||
// leave it in the asserted state but drop it from our lists.
|
||||
if w := s.nextWaker(true, false); w != nil {
|
||||
prev := &s.allWakers
|
||||
for *prev != w {
|
||||
prev = &((*prev).allWakersNext)
|
||||
}
|
||||
*prev = (*prev).allWakersNext
|
||||
w.allWakersNext = nil
|
||||
w.next = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueAssertedWaker enqueues an asserted waker to the "ready" circular list
|
||||
// of wakers that want to notify the sleeper.
|
||||
//
|
||||
//go:nosplit
|
||||
func (s *Sleeper) enqueueAssertedWaker(w *Waker, wakep bool) {
|
||||
// Add the new waker to the front of the list.
|
||||
for {
|
||||
v := (*Waker)(atomic.LoadPointer(&s.sharedList))
|
||||
w.next = v
|
||||
if atomic.CompareAndSwapPointer(&s.sharedList, uwaker(v), uwaker(w)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing to do if there isn't a G waiting.
|
||||
if atomic.LoadUintptr(&s.waitingG) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Signal to the sleeper that a waker has been asserted.
|
||||
switch g := atomic.SwapUintptr(&s.waitingG, 0); g {
|
||||
case 0, preparingG:
|
||||
default:
|
||||
// We managed to get a G. Wake it up.
|
||||
sync.Goready(g, 0, wakep)
|
||||
}
|
||||
}
|
||||
|
||||
// Waker represents a source of wake-up notifications to be sent to sleepers. A
|
||||
// waker can be associated with at most one sleeper at a time, and at any given
|
||||
// time is either in asserted or non-asserted state.
|
||||
//
|
||||
// Once asserted, the waker remains so until it is manually cleared or a sleeper
|
||||
// consumes its assertion (i.e., a sleeper wakes up or is prevented from going
|
||||
// to sleep due to the waker).
|
||||
//
|
||||
// This struct is thread-safe, that is, its methods can be called concurrently
|
||||
// by multiple goroutines.
|
||||
//
|
||||
// Note, it is not safe to copy a Waker as its fields are modified by value
|
||||
// (the pointer fields are individually modified with atomic operations).
|
||||
//
|
||||
// +stateify savable
|
||||
type Waker struct {
|
||||
_ sync.NoCopy
|
||||
|
||||
// s is the sleeper that this waker can wake up. Only one sleeper at a
|
||||
// time is allowed. This field can have three classes of values:
|
||||
// nil -- the waker is not asserted: it either is not associated with
|
||||
// a sleeper, or is queued to a sleeper due to being previously
|
||||
// asserted. This is the zero value.
|
||||
// &assertedSleeper -- the waker is asserted.
|
||||
// otherwise -- the waker is not asserted, and is associated with the
|
||||
// given sleeper. Once it transitions to asserted state, the
|
||||
// associated sleeper will be woken.
|
||||
s unsafe.Pointer `state:".(wakerState)"`
|
||||
|
||||
// next is used to form a linked list of asserted wakers in a sleeper.
|
||||
next *Waker
|
||||
|
||||
// allWakersNext is used to form a linked list of all wakers associated
|
||||
// to a given sleeper.
|
||||
allWakersNext *Waker
|
||||
}
|
||||
|
||||
type wakerState struct {
|
||||
asserted bool
|
||||
other *Sleeper
|
||||
}
|
||||
|
||||
// saveS is invoked by stateify.
|
||||
func (w *Waker) saveS() wakerState {
|
||||
s := (*Sleeper)(atomic.LoadPointer(&w.s))
|
||||
if s == &assertedSleeper {
|
||||
return wakerState{asserted: true}
|
||||
}
|
||||
return wakerState{other: s}
|
||||
}
|
||||
|
||||
// loadS is invoked by stateify.
|
||||
func (w *Waker) loadS(_ context.Context, ws wakerState) {
|
||||
if ws.asserted {
|
||||
atomic.StorePointer(&w.s, unsafe.Pointer(&assertedSleeper))
|
||||
} else {
|
||||
atomic.StorePointer(&w.s, unsafe.Pointer(ws.other))
|
||||
}
|
||||
}
|
||||
|
||||
// assert is the implementation for Assert.
|
||||
//
|
||||
//go:nosplit
|
||||
func (w *Waker) assert(wakep bool) {
|
||||
// Nothing to do if the waker is already asserted. This check allows us
|
||||
// to complete this case (already asserted) without any interlocked
|
||||
// operations on x86.
|
||||
if atomic.LoadPointer(&w.s) == usleeper(&assertedSleeper) {
|
||||
return
|
||||
}
|
||||
|
||||
// Mark the waker as asserted, and wake up a sleeper if there is one.
|
||||
switch s := (*Sleeper)(atomic.SwapPointer(&w.s, usleeper(&assertedSleeper))); s {
|
||||
case nil:
|
||||
case &assertedSleeper:
|
||||
default:
|
||||
s.enqueueAssertedWaker(w, wakep)
|
||||
}
|
||||
}
|
||||
|
||||
// Assert moves the waker to an asserted state, if it isn't asserted yet. When
|
||||
// asserted, the waker will cause its matching sleeper to wake up.
|
||||
func (w *Waker) Assert() {
|
||||
w.assert(true /* wakep */)
|
||||
}
|
||||
|
||||
// Clear moves the waker to then non-asserted state and returns whether it was
|
||||
// asserted before being cleared.
|
||||
//
|
||||
// N.B. The waker isn't removed from the "ready" list of a sleeper (if it
|
||||
// happens to be in one), but the sleeper will notice that it is not asserted
|
||||
// anymore and won't return it to the caller.
|
||||
func (w *Waker) Clear() bool {
|
||||
// Nothing to do if the waker is not asserted. This check allows us to
|
||||
// complete this case (already not asserted) without any interlocked
|
||||
// operations on x86.
|
||||
if atomic.LoadPointer(&w.s) != usleeper(&assertedSleeper) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Try to store nil in the sleeper, which indicates that the waker is
|
||||
// not asserted.
|
||||
return atomic.CompareAndSwapPointer(&w.s, usleeper(&assertedSleeper), nil)
|
||||
}
|
||||
|
||||
// IsAsserted returns whether the waker is currently asserted (i.e., if it's
|
||||
// currently in a state that would cause its matching sleeper to wake up).
|
||||
func (w *Waker) IsAsserted() bool {
|
||||
return (*Sleeper)(atomic.LoadPointer(&w.s)) == &assertedSleeper
|
||||
}
|
||||
|
||||
func usleeper(s *Sleeper) unsafe.Pointer {
|
||||
return unsafe.Pointer(s)
|
||||
}
|
||||
|
||||
func uwaker(w *Waker) unsafe.Pointer {
|
||||
return unsafe.Pointer(w)
|
||||
}
|
||||
80
vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go
vendored
Normal file
80
vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package sleep
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (s *Sleeper) StateTypeName() string {
|
||||
return "pkg/sleep.Sleeper"
|
||||
}
|
||||
|
||||
func (s *Sleeper) StateFields() []string {
|
||||
return []string{
|
||||
"sharedList",
|
||||
"localList",
|
||||
"allWakers",
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sleeper) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (s *Sleeper) StateSave(stateSinkObject state.Sink) {
|
||||
s.beforeSave()
|
||||
var sharedListValue *Waker
|
||||
sharedListValue = s.saveSharedList()
|
||||
stateSinkObject.SaveValue(0, sharedListValue)
|
||||
stateSinkObject.Save(1, &s.localList)
|
||||
stateSinkObject.Save(2, &s.allWakers)
|
||||
}
|
||||
|
||||
func (s *Sleeper) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (s *Sleeper) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(1, &s.localList)
|
||||
stateSourceObject.Load(2, &s.allWakers)
|
||||
stateSourceObject.LoadValue(0, new(*Waker), func(y any) { s.loadSharedList(ctx, y.(*Waker)) })
|
||||
}
|
||||
|
||||
func (w *Waker) StateTypeName() string {
|
||||
return "pkg/sleep.Waker"
|
||||
}
|
||||
|
||||
func (w *Waker) StateFields() []string {
|
||||
return []string{
|
||||
"s",
|
||||
"next",
|
||||
"allWakersNext",
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Waker) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (w *Waker) StateSave(stateSinkObject state.Sink) {
|
||||
w.beforeSave()
|
||||
var sValue wakerState
|
||||
sValue = w.saveS()
|
||||
stateSinkObject.SaveValue(0, sValue)
|
||||
stateSinkObject.Save(1, &w.next)
|
||||
stateSinkObject.Save(2, &w.allWakersNext)
|
||||
}
|
||||
|
||||
func (w *Waker) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (w *Waker) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(1, &w.next)
|
||||
stateSourceObject.Load(2, &w.allWakersNext)
|
||||
stateSourceObject.LoadValue(0, new(wakerState), func(y any) { w.loadS(ctx, y.(wakerState)) })
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Sleeper)(nil))
|
||||
state.Register((*Waker)(nil))
|
||||
}
|
||||
76
vendor/gvisor.dev/gvisor/pkg/state/addr_range.go
vendored
Normal file
76
vendor/gvisor.dev/gvisor/pkg/state/addr_range.go
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
package state
|
||||
|
||||
// A Range represents a contiguous range of T.
|
||||
//
|
||||
// +stateify savable
|
||||
type addrRange struct {
|
||||
// Start is the inclusive start of the range.
|
||||
Start uintptr
|
||||
|
||||
// End is the exclusive end of the range.
|
||||
End uintptr
|
||||
}
|
||||
|
||||
// WellFormed returns true if r.Start <= r.End. All other methods on a Range
|
||||
// require that the Range is well-formed.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r addrRange) WellFormed() bool {
|
||||
return r.Start <= r.End
|
||||
}
|
||||
|
||||
// Length returns the length of the range.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r addrRange) Length() uintptr {
|
||||
return r.End - r.Start
|
||||
}
|
||||
|
||||
// Contains returns true if r contains x.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r addrRange) Contains(x uintptr) bool {
|
||||
return r.Start <= x && x < r.End
|
||||
}
|
||||
|
||||
// Overlaps returns true if r and r2 overlap.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r addrRange) Overlaps(r2 addrRange) bool {
|
||||
return r.Start < r2.End && r2.Start < r.End
|
||||
}
|
||||
|
||||
// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
|
||||
// contained within r.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r addrRange) IsSupersetOf(r2 addrRange) bool {
|
||||
return r.Start <= r2.Start && r.End >= r2.End
|
||||
}
|
||||
|
||||
// Intersect returns a range consisting of the intersection between r and r2.
|
||||
// If r and r2 do not overlap, Intersect returns a range with unspecified
|
||||
// bounds, but for which Length() == 0.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r addrRange) Intersect(r2 addrRange) addrRange {
|
||||
if r.Start < r2.Start {
|
||||
r.Start = r2.Start
|
||||
}
|
||||
if r.End > r2.End {
|
||||
r.End = r2.End
|
||||
}
|
||||
if r.End < r.Start {
|
||||
r.End = r.Start
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// CanSplitAt returns true if it is legal to split a segment spanning the range
|
||||
// r at x; that is, splitting at x would produce two ranges, both of which have
|
||||
// non-zero length.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r addrRange) CanSplitAt(x uintptr) bool {
|
||||
return r.Contains(x) && r.Start < x
|
||||
}
|
||||
1994
vendor/gvisor.dev/gvisor/pkg/state/addr_set.go
vendored
Normal file
1994
vendor/gvisor.dev/gvisor/pkg/state/addr_set.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
239
vendor/gvisor.dev/gvisor/pkg/state/complete_list.go
vendored
Normal file
239
vendor/gvisor.dev/gvisor/pkg/state/complete_list.go
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
package state
|
||||
|
||||
// ElementMapper provides an identity mapping by default.
|
||||
//
|
||||
// This can be replaced to provide a struct that maps elements to linker
|
||||
// objects, if they are not the same. An ElementMapper is not typically
|
||||
// required if: Linker is left as is, Element is left as is, or Linker and
|
||||
// Element are the same type.
|
||||
type completeElementMapper struct{}
|
||||
|
||||
// linkerFor maps an Element to a Linker.
|
||||
//
|
||||
// This default implementation should be inlined.
|
||||
//
|
||||
//go:nosplit
|
||||
func (completeElementMapper) linkerFor(elem *objectDecodeState) *objectDecodeState { return elem }
|
||||
|
||||
// List is an intrusive list. Entries can be added to or removed from the list
|
||||
// in O(1) time and with no additional memory allocations.
|
||||
//
|
||||
// The zero value for List is an empty list ready to use.
|
||||
//
|
||||
// To iterate over a list (where l is a List):
|
||||
//
|
||||
// for e := l.Front(); e != nil; e = e.Next() {
|
||||
// // do something with e.
|
||||
// }
|
||||
//
|
||||
// +stateify savable
|
||||
type completeList struct {
|
||||
head *objectDecodeState
|
||||
tail *objectDecodeState
|
||||
}
|
||||
|
||||
// Reset resets list l to the empty state.
|
||||
func (l *completeList) Reset() {
|
||||
l.head = nil
|
||||
l.tail = nil
|
||||
}
|
||||
|
||||
// Empty returns true iff the list is empty.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) Empty() bool {
|
||||
return l.head == nil
|
||||
}
|
||||
|
||||
// Front returns the first element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) Front() *objectDecodeState {
|
||||
return l.head
|
||||
}
|
||||
|
||||
// Back returns the last element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) Back() *objectDecodeState {
|
||||
return l.tail
|
||||
}
|
||||
|
||||
// Len returns the number of elements in the list.
|
||||
//
|
||||
// NOTE: This is an O(n) operation.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) Len() (count int) {
|
||||
for e := l.Front(); e != nil; e = (completeElementMapper{}.linkerFor(e)).Next() {
|
||||
count++
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// PushFront inserts the element e at the front of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) PushFront(e *objectDecodeState) {
|
||||
linker := completeElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(l.head)
|
||||
linker.SetPrev(nil)
|
||||
if l.head != nil {
|
||||
completeElementMapper{}.linkerFor(l.head).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
l.head = e
|
||||
}
|
||||
|
||||
// PushFrontList inserts list m at the start of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) PushFrontList(m *completeList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
completeElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
|
||||
completeElementMapper{}.linkerFor(m.tail).SetNext(l.head)
|
||||
|
||||
l.head = m.head
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// PushBack inserts the element e at the back of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) PushBack(e *objectDecodeState) {
|
||||
linker := completeElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(l.tail)
|
||||
if l.tail != nil {
|
||||
completeElementMapper{}.linkerFor(l.tail).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
// PushBackList inserts list m at the end of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) PushBackList(m *completeList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
completeElementMapper{}.linkerFor(l.tail).SetNext(m.head)
|
||||
completeElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
|
||||
|
||||
l.tail = m.tail
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// InsertAfter inserts e after b.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) InsertAfter(b, e *objectDecodeState) {
|
||||
bLinker := completeElementMapper{}.linkerFor(b)
|
||||
eLinker := completeElementMapper{}.linkerFor(e)
|
||||
|
||||
a := bLinker.Next()
|
||||
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
bLinker.SetNext(e)
|
||||
|
||||
if a != nil {
|
||||
completeElementMapper{}.linkerFor(a).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
}
|
||||
|
||||
// InsertBefore inserts e before a.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) InsertBefore(a, e *objectDecodeState) {
|
||||
aLinker := completeElementMapper{}.linkerFor(a)
|
||||
eLinker := completeElementMapper{}.linkerFor(e)
|
||||
|
||||
b := aLinker.Prev()
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
aLinker.SetPrev(e)
|
||||
|
||||
if b != nil {
|
||||
completeElementMapper{}.linkerFor(b).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes e from l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *completeList) Remove(e *objectDecodeState) {
|
||||
linker := completeElementMapper{}.linkerFor(e)
|
||||
prev := linker.Prev()
|
||||
next := linker.Next()
|
||||
|
||||
if prev != nil {
|
||||
completeElementMapper{}.linkerFor(prev).SetNext(next)
|
||||
} else if l.head == e {
|
||||
l.head = next
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
completeElementMapper{}.linkerFor(next).SetPrev(prev)
|
||||
} else if l.tail == e {
|
||||
l.tail = prev
|
||||
}
|
||||
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(nil)
|
||||
}
|
||||
|
||||
// Entry is a default implementation of Linker. Users can add anonymous fields
|
||||
// of this type to their structs to make them automatically implement the
|
||||
// methods needed by List.
|
||||
//
|
||||
// +stateify savable
|
||||
type completeEntry struct {
|
||||
next *objectDecodeState
|
||||
prev *objectDecodeState
|
||||
}
|
||||
|
||||
// Next returns the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *completeEntry) Next() *objectDecodeState {
|
||||
return e.next
|
||||
}
|
||||
|
||||
// Prev returns the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *completeEntry) Prev() *objectDecodeState {
|
||||
return e.prev
|
||||
}
|
||||
|
||||
// SetNext assigns 'entry' as the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *completeEntry) SetNext(elem *objectDecodeState) {
|
||||
e.next = elem
|
||||
}
|
||||
|
||||
// SetPrev assigns 'entry' as the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *completeEntry) SetPrev(elem *objectDecodeState) {
|
||||
e.prev = elem
|
||||
}
|
||||
737
vendor/gvisor.dev/gvisor/pkg/state/decode.go
vendored
Normal file
737
vendor/gvisor.dev/gvisor/pkg/state/decode.go
vendored
Normal file
@@ -0,0 +1,737 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state/wire"
|
||||
)
|
||||
|
||||
// internalCallback is a interface called on object completion.
|
||||
//
|
||||
// There are two implementations: objectDecodeState & userCallback.
|
||||
type internalCallback interface {
|
||||
// source returns the dependent object. May be nil.
|
||||
source() *objectDecodeState
|
||||
|
||||
// callbackRun executes the callback.
|
||||
callbackRun()
|
||||
}
|
||||
|
||||
// userCallback is an implementation of internalCallback.
|
||||
type userCallback func()
|
||||
|
||||
// source implements internalCallback.source.
|
||||
func (userCallback) source() *objectDecodeState {
|
||||
return nil
|
||||
}
|
||||
|
||||
// callbackRun implements internalCallback.callbackRun.
|
||||
func (uc userCallback) callbackRun() {
|
||||
uc()
|
||||
}
|
||||
|
||||
// objectDecodeState represents an object that may be in the process of being
|
||||
// decoded. Specifically, it represents either a decoded object, or an an
|
||||
// interest in a future object that will be decoded. When that interest is
|
||||
// registered (via register), the storage for the object will be created, but
|
||||
// it will not be decoded until the object is encountered in the stream.
|
||||
type objectDecodeState struct {
|
||||
// id is the id for this object.
|
||||
id objectID
|
||||
|
||||
// typ is the id for this typeID. This may be zero if this is not a
|
||||
// type-registered structure.
|
||||
typ typeID
|
||||
|
||||
// obj is the object. This may or may not be valid yet, depending on
|
||||
// whether complete returns true. However, regardless of whether the
|
||||
// object is valid, obj contains a final storage location for the
|
||||
// object. This is immutable.
|
||||
//
|
||||
// Note that this must be addressable (obj.Addr() must not panic).
|
||||
//
|
||||
// The obj passed to the decode methods below will equal this obj only
|
||||
// in the case of decoding the top-level object. However, the passed
|
||||
// obj may represent individual fields, elements of a slice, etc. that
|
||||
// are effectively embedded within the reflect.Value below but with
|
||||
// distinct types.
|
||||
obj reflect.Value
|
||||
|
||||
// blockedBy is the number of dependencies this object has.
|
||||
blockedBy int
|
||||
|
||||
// callbacksInline is inline storage for callbacks.
|
||||
callbacksInline [2]internalCallback
|
||||
|
||||
// callbacks is a set of callbacks to execute on load.
|
||||
callbacks []internalCallback
|
||||
|
||||
completeEntry
|
||||
}
|
||||
|
||||
// addCallback adds a callback to the objectDecodeState.
|
||||
func (ods *objectDecodeState) addCallback(ic internalCallback) {
|
||||
if ods.callbacks == nil {
|
||||
ods.callbacks = ods.callbacksInline[:0]
|
||||
}
|
||||
ods.callbacks = append(ods.callbacks, ic)
|
||||
}
|
||||
|
||||
// findCycleFor returns when the given object is found in the blocking set.
|
||||
func (ods *objectDecodeState) findCycleFor(target *objectDecodeState) []*objectDecodeState {
|
||||
for _, ic := range ods.callbacks {
|
||||
other := ic.source()
|
||||
if other != nil && other == target {
|
||||
return []*objectDecodeState{target}
|
||||
} else if childList := other.findCycleFor(target); childList != nil {
|
||||
return append(childList, other)
|
||||
}
|
||||
}
|
||||
|
||||
// This should not occur.
|
||||
Failf("no deadlock found?")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// findCycle finds a dependency cycle.
|
||||
func (ods *objectDecodeState) findCycle() []*objectDecodeState {
|
||||
return append(ods.findCycleFor(ods), ods)
|
||||
}
|
||||
|
||||
// source implements internalCallback.source.
|
||||
func (ods *objectDecodeState) source() *objectDecodeState {
|
||||
return ods
|
||||
}
|
||||
|
||||
// callbackRun implements internalCallback.callbackRun.
|
||||
func (ods *objectDecodeState) callbackRun() {
|
||||
ods.blockedBy--
|
||||
}
|
||||
|
||||
// decodeState is a graph of objects in the process of being decoded.
|
||||
//
|
||||
// The decode process involves loading the breadth-first graph generated by
|
||||
// encode. This graph is read in it's entirety, ensuring that all object
|
||||
// storage is complete.
|
||||
//
|
||||
// As the graph is being serialized, a set of completion callbacks are
|
||||
// executed. These completion callbacks should form a set of acyclic subgraphs
|
||||
// over the original one. After decoding is complete, the objects are scanned
|
||||
// to ensure that all callbacks are executed, otherwise the callback graph was
|
||||
// not acyclic.
|
||||
type decodeState struct {
|
||||
// ctx is the decode context.
|
||||
ctx context.Context
|
||||
|
||||
// r is the input stream.
|
||||
r io.Reader
|
||||
|
||||
// types is the type database.
|
||||
types typeDecodeDatabase
|
||||
|
||||
// objectByID is the set of objects in progress.
|
||||
objectsByID []*objectDecodeState
|
||||
|
||||
// deferred are objects that have been read, by no interest has been
|
||||
// registered yet. These will be decoded once interest in registered.
|
||||
deferred map[objectID]wire.Object
|
||||
|
||||
// pending is the set of objects that are not yet complete.
|
||||
pending completeList
|
||||
|
||||
// stats tracks time data.
|
||||
stats Stats
|
||||
}
|
||||
|
||||
// lookup looks up an object in decodeState or returns nil if no such object
|
||||
// has been previously registered.
|
||||
func (ds *decodeState) lookup(id objectID) *objectDecodeState {
|
||||
if len(ds.objectsByID) < int(id) {
|
||||
return nil
|
||||
}
|
||||
return ds.objectsByID[id-1]
|
||||
}
|
||||
|
||||
// checkComplete checks for completion.
|
||||
func (ds *decodeState) checkComplete(ods *objectDecodeState) bool {
|
||||
// Still blocked?
|
||||
if ods.blockedBy > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Track stats if relevant.
|
||||
if ods.callbacks != nil && ods.typ != 0 {
|
||||
ds.stats.start(ods.typ)
|
||||
defer ds.stats.done()
|
||||
}
|
||||
|
||||
// Fire all callbacks.
|
||||
for _, ic := range ods.callbacks {
|
||||
ic.callbackRun()
|
||||
}
|
||||
|
||||
// Mark completed.
|
||||
cbs := ods.callbacks
|
||||
ods.callbacks = nil
|
||||
ds.pending.Remove(ods)
|
||||
|
||||
// Recursively check others.
|
||||
for _, ic := range cbs {
|
||||
if other := ic.source(); other != nil && other.blockedBy == 0 {
|
||||
ds.checkComplete(other)
|
||||
}
|
||||
}
|
||||
|
||||
return true // All set.
|
||||
}
|
||||
|
||||
// wait registers a dependency on an object.
|
||||
//
|
||||
// As a special case, we always allow _useable_ references back to the first
|
||||
// decoding object because it may have fields that are already decoded. We also
|
||||
// allow trivial self reference, since they can be handled internally.
|
||||
func (ds *decodeState) wait(waiter *objectDecodeState, id objectID, callback func()) {
|
||||
switch id {
|
||||
case waiter.id:
|
||||
// Trivial self reference.
|
||||
fallthrough
|
||||
case 1:
|
||||
// Root object; see above.
|
||||
if callback != nil {
|
||||
callback()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Mark as blocked.
|
||||
waiter.blockedBy++
|
||||
|
||||
// No nil can be returned here.
|
||||
other := ds.lookup(id)
|
||||
if callback != nil {
|
||||
// Add the additional user callback.
|
||||
other.addCallback(userCallback(callback))
|
||||
}
|
||||
|
||||
// Mark waiter as unblocked.
|
||||
other.addCallback(waiter)
|
||||
}
|
||||
|
||||
// waitObject notes a blocking relationship.
|
||||
func (ds *decodeState) waitObject(ods *objectDecodeState, encoded wire.Object, callback func()) {
|
||||
if rv, ok := encoded.(*wire.Ref); ok && rv.Root != 0 {
|
||||
// Refs can encode pointers and maps.
|
||||
ds.wait(ods, objectID(rv.Root), callback)
|
||||
} else if sv, ok := encoded.(*wire.Slice); ok && sv.Ref.Root != 0 {
|
||||
// See decodeObject; we need to wait for the array (if non-nil).
|
||||
ds.wait(ods, objectID(sv.Ref.Root), callback)
|
||||
} else if iv, ok := encoded.(*wire.Interface); ok {
|
||||
// It's an interface (wait recursively).
|
||||
ds.waitObject(ods, iv.Value, callback)
|
||||
} else if callback != nil {
|
||||
// Nothing to wait for: execute the callback immediately.
|
||||
callback()
|
||||
}
|
||||
}
|
||||
|
||||
// walkChild returns a child object from obj, given an accessor path. This is
|
||||
// the decode-side equivalent to traverse in encode.go.
|
||||
//
|
||||
// For the purposes of this function, a child object is either a field within a
|
||||
// struct or an array element, with one such indirection per element in
|
||||
// path. The returned value may be an unexported field, so it may not be
|
||||
// directly assignable. See decode_unsafe.go.
|
||||
func walkChild(path []wire.Dot, obj reflect.Value) reflect.Value {
|
||||
// See wire.Ref.Dots. The path here is specified in reverse order.
|
||||
for i := len(path) - 1; i >= 0; i-- {
|
||||
switch pc := path[i].(type) {
|
||||
case *wire.FieldName: // Must be a pointer.
|
||||
if obj.Kind() != reflect.Struct {
|
||||
Failf("next component in child path is a field name, but the current object is not a struct. Path: %v, current obj: %#v", path, obj)
|
||||
}
|
||||
obj = obj.FieldByName(string(*pc))
|
||||
case wire.Index: // Embedded.
|
||||
if obj.Kind() != reflect.Array {
|
||||
Failf("next component in child path is an array index, but the current object is not an array. Path: %v, current obj: %#v", path, obj)
|
||||
}
|
||||
obj = obj.Index(int(pc))
|
||||
default:
|
||||
panic("unreachable: switch should be exhaustive")
|
||||
}
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// register registers a decode with a type.
|
||||
//
|
||||
// This type is only used to instantiate a new object if it has not been
|
||||
// registered previously. This depends on the type provided if none is
|
||||
// available in the object itself.
|
||||
func (ds *decodeState) register(r *wire.Ref, typ reflect.Type) reflect.Value {
|
||||
// Grow the objectsByID slice.
|
||||
id := objectID(r.Root)
|
||||
if len(ds.objectsByID) < int(id) {
|
||||
ds.objectsByID = append(ds.objectsByID, make([]*objectDecodeState, int(id)-len(ds.objectsByID))...)
|
||||
}
|
||||
|
||||
// Does this object already exist?
|
||||
ods := ds.objectsByID[id-1]
|
||||
if ods != nil {
|
||||
return walkChild(r.Dots, ods.obj)
|
||||
}
|
||||
|
||||
// Create the object.
|
||||
if len(r.Dots) != 0 {
|
||||
typ = ds.findType(r.Type)
|
||||
}
|
||||
v := reflect.New(typ)
|
||||
ods = &objectDecodeState{
|
||||
id: id,
|
||||
obj: v.Elem(),
|
||||
}
|
||||
ds.objectsByID[id-1] = ods
|
||||
ds.pending.PushBack(ods)
|
||||
|
||||
// Process any deferred objects & callbacks.
|
||||
if encoded, ok := ds.deferred[id]; ok {
|
||||
delete(ds.deferred, id)
|
||||
ds.decodeObject(ods, ods.obj, encoded)
|
||||
}
|
||||
|
||||
return walkChild(r.Dots, ods.obj)
|
||||
}
|
||||
|
||||
// objectDecoder is for decoding structs.
|
||||
type objectDecoder struct {
|
||||
// ds is decodeState.
|
||||
ds *decodeState
|
||||
|
||||
// ods is current object being decoded.
|
||||
ods *objectDecodeState
|
||||
|
||||
// reconciledTypeEntry is the reconciled type information.
|
||||
rte *reconciledTypeEntry
|
||||
|
||||
// encoded is the encoded object state.
|
||||
encoded *wire.Struct
|
||||
}
|
||||
|
||||
// load is helper for the public methods on Source.
|
||||
func (od *objectDecoder) load(slot int, objPtr reflect.Value, wait bool, fn func()) {
|
||||
// Note that we have reconciled the type and may remap the fields here
|
||||
// to match what's expected by the decoder. The "slot" parameter here
|
||||
// is in terms of the local type, where the fields in the encoded
|
||||
// object are in terms of the wire object's type, which might be in a
|
||||
// different order (but will have the same fields).
|
||||
v := *od.encoded.Field(od.rte.FieldOrder[slot])
|
||||
od.ds.decodeObject(od.ods, objPtr.Elem(), v)
|
||||
if wait {
|
||||
// Mark this individual object a blocker.
|
||||
od.ds.waitObject(od.ods, v, fn)
|
||||
}
|
||||
}
|
||||
|
||||
// aterLoad implements Source.AfterLoad.
|
||||
func (od *objectDecoder) afterLoad(fn func()) {
|
||||
// Queue the local callback; this will execute when all of the above
|
||||
// data dependencies have been cleared.
|
||||
od.ods.addCallback(userCallback(fn))
|
||||
}
|
||||
|
||||
// decodeStruct decodes a struct value.
|
||||
func (ds *decodeState) decodeStruct(ods *objectDecodeState, obj reflect.Value, encoded *wire.Struct) {
|
||||
if encoded.TypeID == 0 {
|
||||
// Allow anonymous empty structs, but only if the encoded
|
||||
// object also has no fields.
|
||||
if encoded.Fields() == 0 && obj.NumField() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Propagate an error.
|
||||
Failf("empty struct on wire %#v has field mismatch with type %q", encoded, obj.Type().Name())
|
||||
}
|
||||
|
||||
// Lookup the object type.
|
||||
rte := ds.types.Lookup(typeID(encoded.TypeID), obj.Type())
|
||||
ods.typ = typeID(encoded.TypeID)
|
||||
|
||||
// Invoke the loader.
|
||||
od := objectDecoder{
|
||||
ds: ds,
|
||||
ods: ods,
|
||||
rte: rte,
|
||||
encoded: encoded,
|
||||
}
|
||||
ds.stats.start(ods.typ)
|
||||
defer ds.stats.done()
|
||||
if sl, ok := obj.Addr().Interface().(SaverLoader); ok {
|
||||
// Note: may be a registered empty struct which does not
|
||||
// implement the saver/loader interfaces.
|
||||
sl.StateLoad(ds.ctx, Source{internal: od})
|
||||
}
|
||||
}
|
||||
|
||||
// decodeMap decodes a map value.
|
||||
func (ds *decodeState) decodeMap(ods *objectDecodeState, obj reflect.Value, encoded *wire.Map) {
|
||||
if obj.IsNil() {
|
||||
// See pointerTo.
|
||||
obj.Set(reflect.MakeMap(obj.Type()))
|
||||
}
|
||||
for i := 0; i < len(encoded.Keys); i++ {
|
||||
// Decode the objects.
|
||||
kv := reflect.New(obj.Type().Key()).Elem()
|
||||
vv := reflect.New(obj.Type().Elem()).Elem()
|
||||
ds.decodeObject(ods, kv, encoded.Keys[i])
|
||||
ds.decodeObject(ods, vv, encoded.Values[i])
|
||||
ds.waitObject(ods, encoded.Keys[i], nil)
|
||||
ds.waitObject(ods, encoded.Values[i], nil)
|
||||
|
||||
// Set in the map.
|
||||
obj.SetMapIndex(kv, vv)
|
||||
}
|
||||
}
|
||||
|
||||
// decodeArray decodes an array value.
|
||||
func (ds *decodeState) decodeArray(ods *objectDecodeState, obj reflect.Value, encoded *wire.Array) {
|
||||
if len(encoded.Contents) != obj.Len() {
|
||||
Failf("mismatching array length expect=%d, actual=%d", obj.Len(), len(encoded.Contents))
|
||||
}
|
||||
// Decode the contents into the array.
|
||||
for i := 0; i < len(encoded.Contents); i++ {
|
||||
ds.decodeObject(ods, obj.Index(i), encoded.Contents[i])
|
||||
ds.waitObject(ods, encoded.Contents[i], nil)
|
||||
}
|
||||
}
|
||||
|
||||
// findType finds the type for the given wire.TypeSpecs.
|
||||
func (ds *decodeState) findType(t wire.TypeSpec) reflect.Type {
|
||||
switch x := t.(type) {
|
||||
case wire.TypeID:
|
||||
typ := ds.types.LookupType(typeID(x))
|
||||
rte := ds.types.Lookup(typeID(x), typ)
|
||||
return rte.LocalType
|
||||
case *wire.TypeSpecPointer:
|
||||
return reflect.PtrTo(ds.findType(x.Type))
|
||||
case *wire.TypeSpecArray:
|
||||
return reflect.ArrayOf(int(x.Count), ds.findType(x.Type))
|
||||
case *wire.TypeSpecSlice:
|
||||
return reflect.SliceOf(ds.findType(x.Type))
|
||||
case *wire.TypeSpecMap:
|
||||
return reflect.MapOf(ds.findType(x.Key), ds.findType(x.Value))
|
||||
default:
|
||||
// Should not happen.
|
||||
Failf("unknown type %#v", t)
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// decodeInterface decodes an interface value.
|
||||
func (ds *decodeState) decodeInterface(ods *objectDecodeState, obj reflect.Value, encoded *wire.Interface) {
|
||||
if _, ok := encoded.Type.(wire.TypeSpecNil); ok {
|
||||
// Special case; the nil object. Just decode directly, which
|
||||
// will read nil from the wire (if encoded correctly).
|
||||
ds.decodeObject(ods, obj, encoded.Value)
|
||||
return
|
||||
}
|
||||
|
||||
// We now need to resolve the actual type.
|
||||
typ := ds.findType(encoded.Type)
|
||||
|
||||
// We need to imbue type information here, then we can proceed to
|
||||
// decode normally. In order to avoid issues with setting value-types,
|
||||
// we create a new non-interface version of this object. We will then
|
||||
// set the interface object to be equal to whatever we decode.
|
||||
origObj := obj
|
||||
obj = reflect.New(typ).Elem()
|
||||
defer origObj.Set(obj)
|
||||
|
||||
// With the object now having sufficient type information to actually
|
||||
// have Set called on it, we can proceed to decode the value.
|
||||
ds.decodeObject(ods, obj, encoded.Value)
|
||||
}
|
||||
|
||||
// isFloatEq determines if x and y represent the same value.
|
||||
func isFloatEq(x float64, y float64) bool {
|
||||
switch {
|
||||
case math.IsNaN(x):
|
||||
return math.IsNaN(y)
|
||||
case math.IsInf(x, 1):
|
||||
return math.IsInf(y, 1)
|
||||
case math.IsInf(x, -1):
|
||||
return math.IsInf(y, -1)
|
||||
default:
|
||||
return x == y
|
||||
}
|
||||
}
|
||||
|
||||
// isComplexEq determines if x and y represent the same value.
|
||||
func isComplexEq(x complex128, y complex128) bool {
|
||||
return isFloatEq(real(x), real(y)) && isFloatEq(imag(x), imag(y))
|
||||
}
|
||||
|
||||
// decodeObject decodes a object value.
|
||||
func (ds *decodeState) decodeObject(ods *objectDecodeState, obj reflect.Value, encoded wire.Object) {
|
||||
switch x := encoded.(type) {
|
||||
case wire.Nil: // Fast path: first.
|
||||
// We leave obj alone here. That's because if obj represents an
|
||||
// interface, it may have been imbued with type information in
|
||||
// decodeInterface, and we don't want to destroy that.
|
||||
case *wire.Ref:
|
||||
// Nil pointers may be encoded in a "forceValue" context. For
|
||||
// those we just leave it alone as the value will already be
|
||||
// correct (nil).
|
||||
if id := objectID(x.Root); id == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Note that if this is a map type, we go through a level of
|
||||
// indirection to allow for map aliasing.
|
||||
if obj.Kind() == reflect.Map {
|
||||
v := ds.register(x, obj.Type())
|
||||
if v.IsNil() {
|
||||
// Note that we don't want to clobber the map
|
||||
// if has already been decoded by decodeMap. We
|
||||
// just make it so that we have a consistent
|
||||
// reference when that eventually does happen.
|
||||
v.Set(reflect.MakeMap(v.Type()))
|
||||
}
|
||||
obj.Set(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Normal assignment: authoritative only if no dots.
|
||||
v := ds.register(x, obj.Type().Elem())
|
||||
obj.Set(reflectValueRWAddr(v))
|
||||
case wire.Bool:
|
||||
obj.SetBool(bool(x))
|
||||
case wire.Int:
|
||||
obj.SetInt(int64(x))
|
||||
if obj.Int() != int64(x) {
|
||||
Failf("signed integer truncated from %v to %v", int64(x), obj.Int())
|
||||
}
|
||||
case wire.Uint:
|
||||
obj.SetUint(uint64(x))
|
||||
if obj.Uint() != uint64(x) {
|
||||
Failf("unsigned integer truncated from %v to %v", uint64(x), obj.Uint())
|
||||
}
|
||||
case wire.Float32:
|
||||
obj.SetFloat(float64(x))
|
||||
case wire.Float64:
|
||||
obj.SetFloat(float64(x))
|
||||
if !isFloatEq(obj.Float(), float64(x)) {
|
||||
Failf("floating point number truncated from %v to %v", float64(x), obj.Float())
|
||||
}
|
||||
case *wire.Complex64:
|
||||
obj.SetComplex(complex128(*x))
|
||||
case *wire.Complex128:
|
||||
obj.SetComplex(complex128(*x))
|
||||
if !isComplexEq(obj.Complex(), complex128(*x)) {
|
||||
Failf("complex number truncated from %v to %v", complex128(*x), obj.Complex())
|
||||
}
|
||||
case *wire.String:
|
||||
obj.SetString(string(*x))
|
||||
case *wire.Slice:
|
||||
// See *wire.Ref above; same applies.
|
||||
if id := objectID(x.Ref.Root); id == 0 {
|
||||
return
|
||||
}
|
||||
// Note that it's fine to slice the array here and assume that
|
||||
// contents will still be filled in later on.
|
||||
typ := reflect.ArrayOf(int(x.Capacity), obj.Type().Elem()) // The object type.
|
||||
v := ds.register(&x.Ref, typ)
|
||||
obj.Set(reflectValueRWSlice3(v, 0, int(x.Length), int(x.Capacity)))
|
||||
case *wire.Array:
|
||||
ds.decodeArray(ods, obj, x)
|
||||
case *wire.Struct:
|
||||
ds.decodeStruct(ods, obj, x)
|
||||
case *wire.Map:
|
||||
ds.decodeMap(ods, obj, x)
|
||||
case *wire.Interface:
|
||||
ds.decodeInterface(ods, obj, x)
|
||||
default:
|
||||
// Should not happen, not propagated as an error.
|
||||
Failf("unknown object %#v for %q", encoded, obj.Type().Name())
|
||||
}
|
||||
}
|
||||
|
||||
// Load deserializes the object graph rooted at obj.
|
||||
//
|
||||
// This function may panic and should be run in safely().
|
||||
func (ds *decodeState) Load(obj reflect.Value) {
|
||||
ds.stats.init()
|
||||
defer ds.stats.fini(func(id typeID) string {
|
||||
return ds.types.LookupName(id)
|
||||
})
|
||||
|
||||
// Create the root object.
|
||||
rootOds := &objectDecodeState{
|
||||
id: 1,
|
||||
obj: obj,
|
||||
}
|
||||
ds.objectsByID = append(ds.objectsByID, rootOds)
|
||||
ds.pending.PushBack(rootOds)
|
||||
|
||||
// Read the number of objects.
|
||||
numObjects, object, err := ReadHeader(ds.r)
|
||||
if err != nil {
|
||||
Failf("header error: %w", err)
|
||||
}
|
||||
if !object {
|
||||
Failf("object missing")
|
||||
}
|
||||
|
||||
// Decode all objects.
|
||||
var (
|
||||
encoded wire.Object
|
||||
ods *objectDecodeState
|
||||
id objectID
|
||||
tid = typeID(1)
|
||||
)
|
||||
if err := safely(func() {
|
||||
// Decode all objects in the stream.
|
||||
//
|
||||
// Note that the structure of this decoding loop should match the raw
|
||||
// decoding loop in state/pretty/pretty.printer.printStream().
|
||||
for i := uint64(0); i < numObjects; {
|
||||
// Unmarshal either a type object or object ID.
|
||||
encoded = wire.Load(ds.r)
|
||||
switch we := encoded.(type) {
|
||||
case *wire.Type:
|
||||
ds.types.Register(we)
|
||||
tid++
|
||||
encoded = nil
|
||||
continue
|
||||
case wire.Uint:
|
||||
id = objectID(we)
|
||||
i++
|
||||
// Unmarshal and resolve the actual object.
|
||||
encoded = wire.Load(ds.r)
|
||||
ods = ds.lookup(id)
|
||||
if ods != nil {
|
||||
// Decode the object.
|
||||
ds.decodeObject(ods, ods.obj, encoded)
|
||||
} else {
|
||||
// If an object hasn't had interest registered
|
||||
// previously or isn't yet valid, we deferred
|
||||
// decoding until interest is registered.
|
||||
ds.deferred[id] = encoded
|
||||
}
|
||||
// For error handling.
|
||||
ods = nil
|
||||
encoded = nil
|
||||
default:
|
||||
Failf("wanted type or object ID, got %T", encoded)
|
||||
}
|
||||
}
|
||||
}); err != nil {
|
||||
// Include as much information as we can, taking into account
|
||||
// the possible state transitions above.
|
||||
if ods != nil {
|
||||
Failf("error decoding object ID %d (%T) from %#v: %w", id, ods.obj.Interface(), encoded, err)
|
||||
} else if encoded != nil {
|
||||
Failf("error decoding from %#v: %w", encoded, err)
|
||||
} else {
|
||||
Failf("general decoding error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have any deferred objects.
|
||||
numDeferred := 0
|
||||
for id, encoded := range ds.deferred {
|
||||
numDeferred++
|
||||
if s, ok := encoded.(*wire.Struct); ok && s.TypeID != 0 {
|
||||
typ := ds.types.LookupType(typeID(s.TypeID))
|
||||
Failf("unused deferred object: ID %d, type %v", id, typ)
|
||||
} else {
|
||||
Failf("unused deferred object: ID %d, %#v", id, encoded)
|
||||
}
|
||||
}
|
||||
if numDeferred != 0 {
|
||||
Failf("still had %d deferred objects", numDeferred)
|
||||
}
|
||||
|
||||
// Scan and fire all callbacks. We iterate over the list of incomplete
|
||||
// objects until all have been finished. We stop iterating if no
|
||||
// objects become complete (there is a dependency cycle).
|
||||
//
|
||||
// Note that we iterate backwards here, because there will be a strong
|
||||
// tendendcy for blocking relationships to go from earlier objects to
|
||||
// later (deeper) objects in the graph. This will reduce the number of
|
||||
// iterations required to finish all objects.
|
||||
if err := safely(func() {
|
||||
for ds.pending.Back() != nil {
|
||||
thisCycle := false
|
||||
for ods = ds.pending.Back(); ods != nil; {
|
||||
if ds.checkComplete(ods) {
|
||||
thisCycle = true
|
||||
break
|
||||
}
|
||||
ods = ods.Prev()
|
||||
}
|
||||
if !thisCycle {
|
||||
break
|
||||
}
|
||||
}
|
||||
}); err != nil {
|
||||
Failf("error executing callbacks: %w\nfor object %#v", err, ods.obj.Interface())
|
||||
}
|
||||
|
||||
// Check if we have any remaining dependency cycles. If there are any
|
||||
// objects left in the pending list, then it must be due to a cycle.
|
||||
if ods := ds.pending.Front(); ods != nil {
|
||||
// This must be the result of a dependency cycle.
|
||||
cycle := ods.findCycle()
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("dependency cycle: {")
|
||||
for i, cycleOS := range cycle {
|
||||
if i > 0 {
|
||||
buf.WriteString(" => ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "%q", cycleOS.obj.Type())
|
||||
}
|
||||
buf.WriteString("}")
|
||||
Failf("incomplete graph: %s", string(buf.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
// ReadHeader reads an object header.
|
||||
//
|
||||
// Each object written to the statefile is prefixed with a header. See
|
||||
// WriteHeader for more information; these functions are exported to allow
|
||||
// non-state writes to the file to play nice with debugging tools.
|
||||
func ReadHeader(r io.Reader) (length uint64, object bool, err error) {
|
||||
// Read the header.
|
||||
err = safely(func() {
|
||||
length = wire.LoadUint(r)
|
||||
})
|
||||
if err != nil {
|
||||
// On the header, pass raw I/O errors.
|
||||
if sErr, ok := err.(*ErrState); ok {
|
||||
return 0, false, sErr.Unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// Decode whether the object is valid.
|
||||
object = length&objectFlag != 0
|
||||
length &^= objectFlag
|
||||
return
|
||||
}
|
||||
76
vendor/gvisor.dev/gvisor/pkg/state/decode_unsafe.go
vendored
Normal file
76
vendor/gvisor.dev/gvisor/pkg/state/decode_unsafe.go
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// reflectValueRWAddr is equivalent to obj.Addr(), except that the returned
|
||||
// reflect.Value is usable in assignments even if obj was obtained by the use
|
||||
// of unexported struct fields.
|
||||
//
|
||||
// Preconditions: obj.CanAddr().
|
||||
func reflectValueRWAddr(obj reflect.Value) reflect.Value {
|
||||
return reflect.NewAt(obj.Type(), unsafe.Pointer(obj.UnsafeAddr()))
|
||||
}
|
||||
|
||||
// reflectValueRWSlice3 is equivalent to arr.Slice3(i, j, k), except that the
|
||||
// returned reflect.Value is usable in assignments even if obj was obtained by
|
||||
// the use of unexported struct fields.
|
||||
//
|
||||
// Preconditions:
|
||||
// - arr.Kind() == reflect.Array.
|
||||
// - i, j, k >= 0.
|
||||
// - i <= j <= k <= arr.Len().
|
||||
func reflectValueRWSlice3(arr reflect.Value, i, j, k int) reflect.Value {
|
||||
if arr.Kind() != reflect.Array {
|
||||
panic(fmt.Sprintf("arr has kind %v, wanted %v", arr.Kind(), reflect.Array))
|
||||
}
|
||||
if i < 0 || j < 0 || k < 0 {
|
||||
panic(fmt.Sprintf("negative subscripts (%d, %d, %d)", i, j, k))
|
||||
}
|
||||
if i > j {
|
||||
panic(fmt.Sprintf("subscript i (%d) > j (%d)", i, j))
|
||||
}
|
||||
if j > k {
|
||||
panic(fmt.Sprintf("subscript j (%d) > k (%d)", j, k))
|
||||
}
|
||||
if k > arr.Len() {
|
||||
panic(fmt.Sprintf("subscript k (%d) > array length (%d)", k, arr.Len()))
|
||||
}
|
||||
|
||||
sliceTyp := reflect.SliceOf(arr.Type().Elem())
|
||||
if i == arr.Len() {
|
||||
// By precondition, i == j == k == arr.Len().
|
||||
return reflect.MakeSlice(sliceTyp, 0, 0)
|
||||
}
|
||||
slh := reflect.SliceHeader{
|
||||
// reflect.Value.CanAddr() == false for arrays, so we need to get the
|
||||
// address from the first element of the array.
|
||||
Data: arr.Index(i).UnsafeAddr(),
|
||||
Len: j - i,
|
||||
Cap: k - i,
|
||||
}
|
||||
slobj := reflect.NewAt(sliceTyp, unsafe.Pointer(&slh)).Elem()
|
||||
// Before slobj is constructed, arr holds the only pointer-typed pointer to
|
||||
// the array since reflect.SliceHeader.Data is a uintptr, so arr must be
|
||||
// kept alive.
|
||||
runtime.KeepAlive(arr)
|
||||
return slobj
|
||||
}
|
||||
239
vendor/gvisor.dev/gvisor/pkg/state/deferred_list.go
vendored
Normal file
239
vendor/gvisor.dev/gvisor/pkg/state/deferred_list.go
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
package state
|
||||
|
||||
// ElementMapper provides an identity mapping by default.
|
||||
//
|
||||
// This can be replaced to provide a struct that maps elements to linker
|
||||
// objects, if they are not the same. An ElementMapper is not typically
|
||||
// required if: Linker is left as is, Element is left as is, or Linker and
|
||||
// Element are the same type.
|
||||
type deferredElementMapper struct{}
|
||||
|
||||
// linkerFor maps an Element to a Linker.
|
||||
//
|
||||
// This default implementation should be inlined.
|
||||
//
|
||||
//go:nosplit
|
||||
func (deferredElementMapper) linkerFor(elem *objectEncodeState) *objectEncodeState { return elem }
|
||||
|
||||
// List is an intrusive list. Entries can be added to or removed from the list
|
||||
// in O(1) time and with no additional memory allocations.
|
||||
//
|
||||
// The zero value for List is an empty list ready to use.
|
||||
//
|
||||
// To iterate over a list (where l is a List):
|
||||
//
|
||||
// for e := l.Front(); e != nil; e = e.Next() {
|
||||
// // do something with e.
|
||||
// }
|
||||
//
|
||||
// +stateify savable
|
||||
type deferredList struct {
|
||||
head *objectEncodeState
|
||||
tail *objectEncodeState
|
||||
}
|
||||
|
||||
// Reset resets list l to the empty state.
|
||||
func (l *deferredList) Reset() {
|
||||
l.head = nil
|
||||
l.tail = nil
|
||||
}
|
||||
|
||||
// Empty returns true iff the list is empty.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) Empty() bool {
|
||||
return l.head == nil
|
||||
}
|
||||
|
||||
// Front returns the first element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) Front() *objectEncodeState {
|
||||
return l.head
|
||||
}
|
||||
|
||||
// Back returns the last element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) Back() *objectEncodeState {
|
||||
return l.tail
|
||||
}
|
||||
|
||||
// Len returns the number of elements in the list.
|
||||
//
|
||||
// NOTE: This is an O(n) operation.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) Len() (count int) {
|
||||
for e := l.Front(); e != nil; e = (deferredElementMapper{}.linkerFor(e)).Next() {
|
||||
count++
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// PushFront inserts the element e at the front of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) PushFront(e *objectEncodeState) {
|
||||
linker := deferredElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(l.head)
|
||||
linker.SetPrev(nil)
|
||||
if l.head != nil {
|
||||
deferredElementMapper{}.linkerFor(l.head).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
l.head = e
|
||||
}
|
||||
|
||||
// PushFrontList inserts list m at the start of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) PushFrontList(m *deferredList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
deferredElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
|
||||
deferredElementMapper{}.linkerFor(m.tail).SetNext(l.head)
|
||||
|
||||
l.head = m.head
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// PushBack inserts the element e at the back of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) PushBack(e *objectEncodeState) {
|
||||
linker := deferredElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(l.tail)
|
||||
if l.tail != nil {
|
||||
deferredElementMapper{}.linkerFor(l.tail).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
// PushBackList inserts list m at the end of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) PushBackList(m *deferredList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
deferredElementMapper{}.linkerFor(l.tail).SetNext(m.head)
|
||||
deferredElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
|
||||
|
||||
l.tail = m.tail
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// InsertAfter inserts e after b.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) InsertAfter(b, e *objectEncodeState) {
|
||||
bLinker := deferredElementMapper{}.linkerFor(b)
|
||||
eLinker := deferredElementMapper{}.linkerFor(e)
|
||||
|
||||
a := bLinker.Next()
|
||||
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
bLinker.SetNext(e)
|
||||
|
||||
if a != nil {
|
||||
deferredElementMapper{}.linkerFor(a).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
}
|
||||
|
||||
// InsertBefore inserts e before a.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) InsertBefore(a, e *objectEncodeState) {
|
||||
aLinker := deferredElementMapper{}.linkerFor(a)
|
||||
eLinker := deferredElementMapper{}.linkerFor(e)
|
||||
|
||||
b := aLinker.Prev()
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
aLinker.SetPrev(e)
|
||||
|
||||
if b != nil {
|
||||
deferredElementMapper{}.linkerFor(b).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes e from l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *deferredList) Remove(e *objectEncodeState) {
|
||||
linker := deferredElementMapper{}.linkerFor(e)
|
||||
prev := linker.Prev()
|
||||
next := linker.Next()
|
||||
|
||||
if prev != nil {
|
||||
deferredElementMapper{}.linkerFor(prev).SetNext(next)
|
||||
} else if l.head == e {
|
||||
l.head = next
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
deferredElementMapper{}.linkerFor(next).SetPrev(prev)
|
||||
} else if l.tail == e {
|
||||
l.tail = prev
|
||||
}
|
||||
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(nil)
|
||||
}
|
||||
|
||||
// Entry is a default implementation of Linker. Users can add anonymous fields
|
||||
// of this type to their structs to make them automatically implement the
|
||||
// methods needed by List.
|
||||
//
|
||||
// +stateify savable
|
||||
type deferredEntry struct {
|
||||
next *objectEncodeState
|
||||
prev *objectEncodeState
|
||||
}
|
||||
|
||||
// Next returns the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *deferredEntry) Next() *objectEncodeState {
|
||||
return e.next
|
||||
}
|
||||
|
||||
// Prev returns the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *deferredEntry) Prev() *objectEncodeState {
|
||||
return e.prev
|
||||
}
|
||||
|
||||
// SetNext assigns 'entry' as the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *deferredEntry) SetNext(elem *objectEncodeState) {
|
||||
e.next = elem
|
||||
}
|
||||
|
||||
// SetPrev assigns 'entry' as the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *deferredEntry) SetPrev(elem *objectEncodeState) {
|
||||
e.prev = elem
|
||||
}
|
||||
874
vendor/gvisor.dev/gvisor/pkg/state/encode.go
vendored
Normal file
874
vendor/gvisor.dev/gvisor/pkg/state/encode.go
vendored
Normal file
@@ -0,0 +1,874 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state/wire"
|
||||
)
|
||||
|
||||
// objectEncodeState the type and identity of an object occupying a memory
|
||||
// address range. This is the value type for addrSet, and the intrusive entry
|
||||
// for the deferred list.
|
||||
type objectEncodeState struct {
|
||||
// id is the assigned ID for this object.
|
||||
id objectID
|
||||
|
||||
// obj is the object value. Note that this may be replaced if we
|
||||
// encounter an object that contains this object. When this happens (in
|
||||
// resolve), we will update existing references appropriately, below,
|
||||
// and defer a re-encoding of the object.
|
||||
obj reflect.Value
|
||||
|
||||
// encoded is the encoded value of this object. Note that this may not
|
||||
// be up to date if this object is still in the deferred list.
|
||||
encoded wire.Object
|
||||
|
||||
// how indicates whether this object should be encoded as a value. This
|
||||
// is used only for deferred encoding.
|
||||
how encodeStrategy
|
||||
|
||||
// refs are the list of reference objects used by other objects
|
||||
// referring to this object. When the object is updated, these
|
||||
// references may be updated directly and automatically.
|
||||
refs []*wire.Ref
|
||||
|
||||
deferredEntry
|
||||
}
|
||||
|
||||
// encodeState is state used for encoding.
|
||||
//
|
||||
// The encoding process constructs a representation of the in-memory graph of
|
||||
// objects before a single object is serialized. This is done to ensure that
|
||||
// all references can be fully disambiguated. See resolve for more details.
|
||||
type encodeState struct {
|
||||
// ctx is the encode context.
|
||||
ctx context.Context
|
||||
|
||||
// w is the output stream.
|
||||
w io.Writer
|
||||
|
||||
// types is the type database.
|
||||
types typeEncodeDatabase
|
||||
|
||||
// lastID is the last allocated object ID.
|
||||
lastID objectID
|
||||
|
||||
// values tracks the address ranges occupied by objects, along with the
|
||||
// types of these objects. This is used to locate pointer targets,
|
||||
// including pointers to fields within another type.
|
||||
//
|
||||
// Multiple objects may overlap in memory iff the larger object fully
|
||||
// contains the smaller one, and the type of the smaller object matches
|
||||
// a field or array element's type at the appropriate offset. An
|
||||
// arbitrary number of objects may be nested in this manner.
|
||||
//
|
||||
// Note that this does not track zero-sized objects, those are tracked
|
||||
// by zeroValues below.
|
||||
values addrSet
|
||||
|
||||
// zeroValues tracks zero-sized objects.
|
||||
zeroValues map[reflect.Type]*objectEncodeState
|
||||
|
||||
// deferred is the list of objects to be encoded.
|
||||
deferred deferredList
|
||||
|
||||
// pendingTypes is the list of types to be serialized. Serialization
|
||||
// will occur when all objects have been encoded, but before pending is
|
||||
// serialized.
|
||||
pendingTypes []wire.Type
|
||||
|
||||
// pending maps object IDs to objects to be serialized. Serialization does
|
||||
// not actually occur until the full object graph is computed.
|
||||
pending map[objectID]*objectEncodeState
|
||||
|
||||
// encodedStructs maps reflect.Values representing structs to previous
|
||||
// encodings of those structs. This is necessary to avoid duplicate calls
|
||||
// to SaverLoader.StateSave() that may result in multiple calls to
|
||||
// Sink.SaveValue() for a given field, resulting in object duplication.
|
||||
encodedStructs map[reflect.Value]*wire.Struct
|
||||
|
||||
// stats tracks time data.
|
||||
stats Stats
|
||||
}
|
||||
|
||||
// isSameSizeParent returns true if child is a field value or element within
|
||||
// parent. Only a struct or array can have a child value.
|
||||
//
|
||||
// isSameSizeParent deals with objects like this:
|
||||
//
|
||||
// struct child {
|
||||
// // fields..
|
||||
// }
|
||||
//
|
||||
// struct parent {
|
||||
// c child
|
||||
// }
|
||||
//
|
||||
// var p parent
|
||||
// record(&p.c)
|
||||
//
|
||||
// Here, &p and &p.c occupy the exact same address range.
|
||||
//
|
||||
// Or like this:
|
||||
//
|
||||
// struct child {
|
||||
// // fields
|
||||
// }
|
||||
//
|
||||
// var arr [1]parent
|
||||
// record(&arr[0])
|
||||
//
|
||||
// Similarly, &arr[0] and &arr[0].c have the exact same address range.
|
||||
//
|
||||
// Precondition: parent and child must occupy the same memory.
|
||||
func isSameSizeParent(parent reflect.Value, childType reflect.Type) bool {
|
||||
switch parent.Kind() {
|
||||
case reflect.Struct:
|
||||
for i := 0; i < parent.NumField(); i++ {
|
||||
field := parent.Field(i)
|
||||
if field.Type() == childType {
|
||||
return true
|
||||
}
|
||||
// Recurse through any intermediate types.
|
||||
if isSameSizeParent(field, childType) {
|
||||
return true
|
||||
}
|
||||
// Does it make sense to keep going if the first field
|
||||
// doesn't match? Yes, because there might be an
|
||||
// arbitrary number of zero-sized fields before we get
|
||||
// a match, and childType itself can be zero-sized.
|
||||
}
|
||||
return false
|
||||
case reflect.Array:
|
||||
// The only case where an array with more than one elements can
|
||||
// return true is if childType is zero-sized. In such cases,
|
||||
// it's ambiguous which element contains the match since a
|
||||
// zero-sized child object fully fits in any of the zero-sized
|
||||
// elements in an array... However since all elements are of
|
||||
// the same type, we only need to check one element.
|
||||
//
|
||||
// For non-zero-sized childTypes, parent.Len() must be 1, but a
|
||||
// combination of the precondition and an implicit comparison
|
||||
// between the array element size and childType ensures this.
|
||||
return parent.Len() > 0 && isSameSizeParent(parent.Index(0), childType)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// nextID returns the next valid ID.
|
||||
func (es *encodeState) nextID() objectID {
|
||||
es.lastID++
|
||||
return objectID(es.lastID)
|
||||
}
|
||||
|
||||
// dummyAddr points to the dummy zero-sized address.
|
||||
var dummyAddr = reflect.ValueOf(new(struct{})).Pointer()
|
||||
|
||||
// resolve records the address range occupied by an object.
|
||||
func (es *encodeState) resolve(obj reflect.Value, ref *wire.Ref) {
|
||||
addr := obj.Pointer()
|
||||
|
||||
// Is this a map pointer? Just record the single address. It is not
|
||||
// possible to take any pointers into the map internals.
|
||||
if obj.Kind() == reflect.Map {
|
||||
if addr == 0 {
|
||||
// Just leave the nil reference alone. This is fine, we
|
||||
// may need to encode as a reference in this way. We
|
||||
// return nil for our objectEncodeState so that anyone
|
||||
// depending on this value knows there's nothing there.
|
||||
return
|
||||
}
|
||||
seg, gap := es.values.Find(addr)
|
||||
if seg.Ok() {
|
||||
// Ensure the map types match.
|
||||
existing := seg.Value()
|
||||
if existing.obj.Type() != obj.Type() {
|
||||
Failf("overlapping map objects at 0x%x: [new object] %#v [existing object type] %s", addr, obj, existing.obj)
|
||||
}
|
||||
|
||||
// No sense recording refs, maps may not be replaced by
|
||||
// covering objects, they are maximal.
|
||||
ref.Root = wire.Uint(existing.id)
|
||||
return
|
||||
}
|
||||
|
||||
// Record the map.
|
||||
r := addrRange{addr, addr + 1}
|
||||
oes := &objectEncodeState{
|
||||
id: es.nextID(),
|
||||
obj: obj,
|
||||
how: encodeMapAsValue,
|
||||
}
|
||||
// Use Insert instead of InsertWithoutMergingUnchecked when race
|
||||
// detection is enabled to get additional sanity-checking from Merge.
|
||||
if !raceEnabled {
|
||||
es.values.InsertWithoutMergingUnchecked(gap, r, oes)
|
||||
} else {
|
||||
es.values.Insert(gap, r, oes)
|
||||
}
|
||||
es.pending[oes.id] = oes
|
||||
es.deferred.PushBack(oes)
|
||||
|
||||
// See above: no ref recording.
|
||||
ref.Root = wire.Uint(oes.id)
|
||||
return
|
||||
}
|
||||
|
||||
// If not a map, then the object must be a pointer.
|
||||
if obj.Kind() != reflect.Ptr {
|
||||
Failf("attempt to record non-map and non-pointer object %#v", obj)
|
||||
}
|
||||
|
||||
obj = obj.Elem() // Value from here.
|
||||
|
||||
// Is this a zero-sized type?
|
||||
typ := obj.Type()
|
||||
size := typ.Size()
|
||||
if size == 0 {
|
||||
if addr == dummyAddr {
|
||||
// Zero-sized objects point to a dummy byte within the
|
||||
// runtime. There's no sense recording this in the
|
||||
// address map. We add this to the dedicated
|
||||
// zeroValues.
|
||||
//
|
||||
// Note that zero-sized objects must be *true*
|
||||
// zero-sized objects. They cannot be part of some
|
||||
// larger object. In that case, they are assigned a
|
||||
// 1-byte address at the end of the object.
|
||||
oes, ok := es.zeroValues[typ]
|
||||
if !ok {
|
||||
oes = &objectEncodeState{
|
||||
id: es.nextID(),
|
||||
obj: obj,
|
||||
}
|
||||
es.zeroValues[typ] = oes
|
||||
es.pending[oes.id] = oes
|
||||
es.deferred.PushBack(oes)
|
||||
}
|
||||
|
||||
// There's also no sense tracking back references. We
|
||||
// know that this is a true zero-sized object, and not
|
||||
// part of a larger container, so it will not change.
|
||||
ref.Root = wire.Uint(oes.id)
|
||||
return
|
||||
}
|
||||
size = 1 // See above.
|
||||
}
|
||||
|
||||
end := addr + size
|
||||
r := addrRange{addr, end}
|
||||
seg := es.values.LowerBoundSegment(addr)
|
||||
var (
|
||||
oes *objectEncodeState
|
||||
gap addrGapIterator
|
||||
)
|
||||
|
||||
// Does at least one previously-registered object overlap this one?
|
||||
if seg.Ok() && seg.Start() < end {
|
||||
existing := seg.Value()
|
||||
|
||||
if seg.Range() == r && typ == existing.obj.Type() {
|
||||
// This exact object is already registered. Avoid the traversal and
|
||||
// just return directly. We don't need to encode the type
|
||||
// information or any dots here.
|
||||
ref.Root = wire.Uint(existing.id)
|
||||
existing.refs = append(existing.refs, ref)
|
||||
return
|
||||
}
|
||||
|
||||
if seg.Range().IsSupersetOf(r) && (seg.Range() != r || isSameSizeParent(existing.obj, typ)) {
|
||||
// This object is contained within a previously-registered object.
|
||||
// Perform traversal from the container to the new object.
|
||||
ref.Root = wire.Uint(existing.id)
|
||||
ref.Dots = traverse(existing.obj.Type(), typ, seg.Start(), addr)
|
||||
ref.Type = es.findType(existing.obj.Type())
|
||||
existing.refs = append(existing.refs, ref)
|
||||
return
|
||||
}
|
||||
|
||||
// This object contains one or more previously-registered objects.
|
||||
// Remove them and update existing references to use the new one.
|
||||
oes := &objectEncodeState{
|
||||
// Reuse the root ID of the first contained element.
|
||||
id: existing.id,
|
||||
obj: obj,
|
||||
}
|
||||
type elementEncodeState struct {
|
||||
addr uintptr
|
||||
typ reflect.Type
|
||||
refs []*wire.Ref
|
||||
}
|
||||
var (
|
||||
elems []elementEncodeState
|
||||
gap addrGapIterator
|
||||
)
|
||||
for {
|
||||
// Each contained object should be completely contained within
|
||||
// this one.
|
||||
if raceEnabled && !r.IsSupersetOf(seg.Range()) {
|
||||
Failf("containing object %#v does not contain existing object %#v", obj, existing.obj)
|
||||
}
|
||||
elems = append(elems, elementEncodeState{
|
||||
addr: seg.Start(),
|
||||
typ: existing.obj.Type(),
|
||||
refs: existing.refs,
|
||||
})
|
||||
delete(es.pending, existing.id)
|
||||
es.deferred.Remove(existing)
|
||||
gap = es.values.Remove(seg)
|
||||
seg = gap.NextSegment()
|
||||
if !seg.Ok() || seg.Start() >= end {
|
||||
break
|
||||
}
|
||||
existing = seg.Value()
|
||||
}
|
||||
wt := es.findType(typ)
|
||||
for _, elem := range elems {
|
||||
dots := traverse(typ, elem.typ, addr, elem.addr)
|
||||
for _, ref := range elem.refs {
|
||||
ref.Root = wire.Uint(oes.id)
|
||||
ref.Dots = append(ref.Dots, dots...)
|
||||
ref.Type = wt
|
||||
}
|
||||
oes.refs = append(oes.refs, elem.refs...)
|
||||
}
|
||||
// Finally register the new containing object.
|
||||
if !raceEnabled {
|
||||
es.values.InsertWithoutMergingUnchecked(gap, r, oes)
|
||||
} else {
|
||||
es.values.Insert(gap, r, oes)
|
||||
}
|
||||
es.pending[oes.id] = oes
|
||||
es.deferred.PushBack(oes)
|
||||
ref.Root = wire.Uint(oes.id)
|
||||
oes.refs = append(oes.refs, ref)
|
||||
return
|
||||
}
|
||||
|
||||
// No existing object overlaps this one. Register a new object.
|
||||
oes = &objectEncodeState{
|
||||
id: es.nextID(),
|
||||
obj: obj,
|
||||
}
|
||||
if seg.Ok() {
|
||||
gap = seg.PrevGap()
|
||||
} else {
|
||||
gap = es.values.LastGap()
|
||||
}
|
||||
if !raceEnabled {
|
||||
es.values.InsertWithoutMergingUnchecked(gap, r, oes)
|
||||
} else {
|
||||
es.values.Insert(gap, r, oes)
|
||||
}
|
||||
es.pending[oes.id] = oes
|
||||
es.deferred.PushBack(oes)
|
||||
ref.Root = wire.Uint(oes.id)
|
||||
oes.refs = append(oes.refs, ref)
|
||||
}
|
||||
|
||||
// traverse searches for a target object within a root object, where the target
|
||||
// object is a struct field or array element within root, with potentially
|
||||
// multiple intervening types. traverse returns the set of field or element
|
||||
// traversals required to reach the target.
|
||||
//
|
||||
// Note that for efficiency, traverse returns the dots in the reverse order.
|
||||
// That is, the first traversal required will be the last element of the list.
|
||||
//
|
||||
// Precondition: The target object must lie completely within the range defined
|
||||
// by [rootAddr, rootAddr + sizeof(rootType)].
|
||||
func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) []wire.Dot {
|
||||
// Recursion base case: the types actually match.
|
||||
if targetType == rootType && targetAddr == rootAddr {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch rootType.Kind() {
|
||||
case reflect.Struct:
|
||||
offset := targetAddr - rootAddr
|
||||
for i := rootType.NumField(); i > 0; i-- {
|
||||
field := rootType.Field(i - 1)
|
||||
// The first field from the end with an offset that is
|
||||
// smaller than or equal to our address offset is where
|
||||
// the target is located. Traverse from there.
|
||||
if field.Offset <= offset {
|
||||
dots := traverse(field.Type, targetType, rootAddr+field.Offset, targetAddr)
|
||||
fieldName := wire.FieldName(field.Name)
|
||||
return append(dots, &fieldName)
|
||||
}
|
||||
}
|
||||
// Should never happen; the target should be reachable.
|
||||
Failf("no field in root type %v contains target type %v", rootType, targetType)
|
||||
|
||||
case reflect.Array:
|
||||
// Since arrays have homogeneous types, all elements have the
|
||||
// same size and we can compute where the target lives. This
|
||||
// does not matter for the purpose of typing, but matters for
|
||||
// the purpose of computing the address of the given index.
|
||||
elemSize := int(rootType.Elem().Size())
|
||||
n := int(targetAddr-rootAddr) / elemSize // Relies on integer division rounding down.
|
||||
if rootType.Len() < n {
|
||||
Failf("traversal target of type %v @%x is beyond the end of the array type %v @%x with %v elements",
|
||||
targetType, targetAddr, rootType, rootAddr, rootType.Len())
|
||||
}
|
||||
dots := traverse(rootType.Elem(), targetType, rootAddr+uintptr(n*elemSize), targetAddr)
|
||||
return append(dots, wire.Index(n))
|
||||
|
||||
default:
|
||||
// For any other type, there's no possibility of aliasing so if
|
||||
// the types didn't match earlier then we have an address
|
||||
// collision which shouldn't be possible at this point.
|
||||
Failf("traverse failed for root type %v and target type %v", rootType, targetType)
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// encodeMap encodes a map.
|
||||
func (es *encodeState) encodeMap(obj reflect.Value, dest *wire.Object) {
|
||||
if obj.IsNil() {
|
||||
// Because there is a difference between a nil map and an empty
|
||||
// map, we need to not decode in the case of a truly nil map.
|
||||
*dest = wire.Nil{}
|
||||
return
|
||||
}
|
||||
l := obj.Len()
|
||||
m := &wire.Map{
|
||||
Keys: make([]wire.Object, l),
|
||||
Values: make([]wire.Object, l),
|
||||
}
|
||||
*dest = m
|
||||
for i, k := range obj.MapKeys() {
|
||||
v := obj.MapIndex(k)
|
||||
// Map keys must be encoded using the full value because the
|
||||
// type will be omitted after the first key.
|
||||
es.encodeObject(k, encodeAsValue, &m.Keys[i])
|
||||
es.encodeObject(v, encodeAsValue, &m.Values[i])
|
||||
}
|
||||
}
|
||||
|
||||
// objectEncoder is for encoding structs.
|
||||
type objectEncoder struct {
|
||||
// es is encodeState.
|
||||
es *encodeState
|
||||
|
||||
// encoded is the encoded struct.
|
||||
encoded *wire.Struct
|
||||
}
|
||||
|
||||
// save is called by the public methods on Sink.
|
||||
func (oe *objectEncoder) save(slot int, obj reflect.Value) {
|
||||
fieldValue := oe.encoded.Field(slot)
|
||||
oe.es.encodeObject(obj, encodeDefault, fieldValue)
|
||||
}
|
||||
|
||||
// encodeStruct encodes a composite object.
|
||||
func (es *encodeState) encodeStruct(obj reflect.Value, dest *wire.Object) {
|
||||
if s, ok := es.encodedStructs[obj]; ok {
|
||||
*dest = s
|
||||
return
|
||||
}
|
||||
s := &wire.Struct{}
|
||||
*dest = s
|
||||
es.encodedStructs[obj] = s
|
||||
|
||||
// Ensure that the obj is addressable. There are two cases when it is
|
||||
// not. First, is when this is dispatched via SaveValue. Second, when
|
||||
// this is a map key as a struct. Either way, we need to make a copy to
|
||||
// obtain an addressable value.
|
||||
if !obj.CanAddr() {
|
||||
localObj := reflect.New(obj.Type())
|
||||
localObj.Elem().Set(obj)
|
||||
obj = localObj.Elem()
|
||||
}
|
||||
|
||||
// Look the type up in the database.
|
||||
te, ok := es.types.Lookup(obj.Type())
|
||||
if te == nil {
|
||||
if obj.NumField() == 0 {
|
||||
// Allow unregistered anonymous, empty structs. This
|
||||
// will just return success without ever invoking the
|
||||
// passed function. This uses the immutable EmptyStruct
|
||||
// variable to prevent an allocation in this case.
|
||||
//
|
||||
// Note that this mechanism does *not* work for
|
||||
// interfaces in general. So you can't dispatch
|
||||
// non-registered empty structs via interfaces because
|
||||
// then they can't be restored.
|
||||
s.Alloc(0)
|
||||
return
|
||||
}
|
||||
// We need a SaverLoader for struct types.
|
||||
Failf("struct %T does not implement SaverLoader", obj.Interface())
|
||||
}
|
||||
if !ok {
|
||||
// Queue the type to be serialized.
|
||||
es.pendingTypes = append(es.pendingTypes, te.Type)
|
||||
}
|
||||
|
||||
// Invoke the provided saver.
|
||||
s.TypeID = wire.TypeID(te.ID)
|
||||
s.Alloc(len(te.Fields))
|
||||
oe := objectEncoder{
|
||||
es: es,
|
||||
encoded: s,
|
||||
}
|
||||
es.stats.start(te.ID)
|
||||
defer es.stats.done()
|
||||
if sl, ok := obj.Addr().Interface().(SaverLoader); ok {
|
||||
// Note: may be a registered empty struct which does not
|
||||
// implement the saver/loader interfaces.
|
||||
sl.StateSave(Sink{internal: oe})
|
||||
}
|
||||
}
|
||||
|
||||
// encodeArray encodes an array.
|
||||
func (es *encodeState) encodeArray(obj reflect.Value, dest *wire.Object) {
|
||||
l := obj.Len()
|
||||
a := &wire.Array{
|
||||
Contents: make([]wire.Object, l),
|
||||
}
|
||||
*dest = a
|
||||
for i := 0; i < l; i++ {
|
||||
// We need to encode the full value because arrays are encoded
|
||||
// using the type information from only the first element.
|
||||
es.encodeObject(obj.Index(i), encodeAsValue, &a.Contents[i])
|
||||
}
|
||||
}
|
||||
|
||||
// findType recursively finds type information.
|
||||
func (es *encodeState) findType(typ reflect.Type) wire.TypeSpec {
|
||||
// First: check if this is a proper type. It's possible for pointers,
|
||||
// slices, arrays, maps, etc to all have some different type.
|
||||
te, ok := es.types.Lookup(typ)
|
||||
if te != nil {
|
||||
if !ok {
|
||||
// See encodeStruct.
|
||||
es.pendingTypes = append(es.pendingTypes, te.Type)
|
||||
}
|
||||
return wire.TypeID(te.ID)
|
||||
}
|
||||
|
||||
switch typ.Kind() {
|
||||
case reflect.Ptr:
|
||||
return &wire.TypeSpecPointer{
|
||||
Type: es.findType(typ.Elem()),
|
||||
}
|
||||
case reflect.Slice:
|
||||
return &wire.TypeSpecSlice{
|
||||
Type: es.findType(typ.Elem()),
|
||||
}
|
||||
case reflect.Array:
|
||||
return &wire.TypeSpecArray{
|
||||
Count: wire.Uint(typ.Len()),
|
||||
Type: es.findType(typ.Elem()),
|
||||
}
|
||||
case reflect.Map:
|
||||
return &wire.TypeSpecMap{
|
||||
Key: es.findType(typ.Key()),
|
||||
Value: es.findType(typ.Elem()),
|
||||
}
|
||||
default:
|
||||
// After potentially chasing many pointers, the
|
||||
// ultimate type of the object is not known.
|
||||
Failf("type %q is not known", typ)
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// encodeInterface encodes an interface.
|
||||
func (es *encodeState) encodeInterface(obj reflect.Value, dest *wire.Object) {
|
||||
// Dereference the object.
|
||||
obj = obj.Elem()
|
||||
if !obj.IsValid() {
|
||||
// Special case: the nil object.
|
||||
*dest = &wire.Interface{
|
||||
Type: wire.TypeSpecNil{},
|
||||
Value: wire.Nil{},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Encode underlying object.
|
||||
i := &wire.Interface{
|
||||
Type: es.findType(obj.Type()),
|
||||
}
|
||||
*dest = i
|
||||
es.encodeObject(obj, encodeAsValue, &i.Value)
|
||||
}
|
||||
|
||||
// isPrimitive returns true if this is a primitive object, or a composite
|
||||
// object composed entirely of primitives.
|
||||
func isPrimitiveZero(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Pointers are always treated as primitive types because we
|
||||
// won't encode directly from here. Returning true here won't
|
||||
// prevent the object from being encoded correctly.
|
||||
return true
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Slice:
|
||||
// The slice itself a primitive, but not necessarily the array
|
||||
// that points to. This is similar to a pointer.
|
||||
return true
|
||||
case reflect.Array:
|
||||
// We cannot treat an array as a primitive, because it may be
|
||||
// composed of structures or other things with side-effects.
|
||||
return isPrimitiveZero(typ.Elem())
|
||||
case reflect.Interface:
|
||||
// Since we now that this type is the zero type, the interface
|
||||
// value must be zero. Therefore this is primitive.
|
||||
return true
|
||||
case reflect.Struct:
|
||||
return false
|
||||
case reflect.Map:
|
||||
// The isPrimitiveZero function is called only on zero-types to
|
||||
// see if it's safe to serialize. Since a zero map has no
|
||||
// elements, it is safe to treat as a primitive.
|
||||
return true
|
||||
default:
|
||||
Failf("unknown type %q", typ.Name())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// encodeStrategy is the strategy used for encodeObject.
|
||||
type encodeStrategy int
|
||||
|
||||
const (
|
||||
// encodeDefault means types are encoded normally as references.
|
||||
encodeDefault encodeStrategy = iota
|
||||
|
||||
// encodeAsValue means that types will never take short-circuited and
|
||||
// will always be encoded as a normal value.
|
||||
encodeAsValue
|
||||
|
||||
// encodeMapAsValue means that even maps will be fully encoded.
|
||||
encodeMapAsValue
|
||||
)
|
||||
|
||||
// encodeObject encodes an object.
|
||||
func (es *encodeState) encodeObject(obj reflect.Value, how encodeStrategy, dest *wire.Object) {
|
||||
if how == encodeDefault && isPrimitiveZero(obj.Type()) && obj.IsZero() {
|
||||
*dest = wire.Nil{}
|
||||
return
|
||||
}
|
||||
switch obj.Kind() {
|
||||
case reflect.Ptr: // Fast path: first.
|
||||
r := new(wire.Ref)
|
||||
*dest = r
|
||||
if obj.IsNil() {
|
||||
// May be in an array or elsewhere such that a value is
|
||||
// required. So we encode as a reference to the zero
|
||||
// object, which does not exist. Note that this has to
|
||||
// be handled correctly in the decode path as well.
|
||||
return
|
||||
}
|
||||
es.resolve(obj, r)
|
||||
case reflect.Bool:
|
||||
*dest = wire.Bool(obj.Bool())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
*dest = wire.Int(obj.Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
*dest = wire.Uint(obj.Uint())
|
||||
case reflect.Float32:
|
||||
*dest = wire.Float32(obj.Float())
|
||||
case reflect.Float64:
|
||||
*dest = wire.Float64(obj.Float())
|
||||
case reflect.Complex64:
|
||||
c := wire.Complex64(obj.Complex())
|
||||
*dest = &c // Needs alloc.
|
||||
case reflect.Complex128:
|
||||
c := wire.Complex128(obj.Complex())
|
||||
*dest = &c // Needs alloc.
|
||||
case reflect.String:
|
||||
s := wire.String(obj.String())
|
||||
*dest = &s // Needs alloc.
|
||||
case reflect.Array:
|
||||
es.encodeArray(obj, dest)
|
||||
case reflect.Slice:
|
||||
s := &wire.Slice{
|
||||
Capacity: wire.Uint(obj.Cap()),
|
||||
Length: wire.Uint(obj.Len()),
|
||||
}
|
||||
*dest = s
|
||||
// Note that we do need to provide a wire.Slice type here as
|
||||
// how is not encodeDefault. If this were the case, then it
|
||||
// would have been caught by the IsZero check above and we
|
||||
// would have just used wire.Nil{}.
|
||||
if obj.IsNil() {
|
||||
return
|
||||
}
|
||||
// Slices need pointer resolution.
|
||||
es.resolve(arrayFromSlice(obj), &s.Ref)
|
||||
case reflect.Interface:
|
||||
es.encodeInterface(obj, dest)
|
||||
case reflect.Struct:
|
||||
es.encodeStruct(obj, dest)
|
||||
case reflect.Map:
|
||||
if how == encodeMapAsValue {
|
||||
es.encodeMap(obj, dest)
|
||||
return
|
||||
}
|
||||
r := new(wire.Ref)
|
||||
*dest = r
|
||||
es.resolve(obj, r)
|
||||
default:
|
||||
Failf("unknown object %#v", obj.Interface())
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// Save serializes the object graph rooted at obj.
|
||||
func (es *encodeState) Save(obj reflect.Value) {
|
||||
es.stats.init()
|
||||
defer es.stats.fini(func(id typeID) string {
|
||||
return es.pendingTypes[id-1].Name
|
||||
})
|
||||
|
||||
// Resolve the first object, which should queue a pile of additional
|
||||
// objects on the pending list. All queued objects should be fully
|
||||
// resolved, and we should be able to serialize after this call.
|
||||
var root wire.Ref
|
||||
es.resolve(obj.Addr(), &root)
|
||||
|
||||
// Encode the graph.
|
||||
var oes *objectEncodeState
|
||||
if err := safely(func() {
|
||||
for oes = es.deferred.Front(); oes != nil; oes = es.deferred.Front() {
|
||||
// Remove and encode the object. Note that as a result
|
||||
// of this encoding, the object may be enqueued on the
|
||||
// deferred list yet again. That's expected, and why it
|
||||
// is removed first.
|
||||
es.deferred.Remove(oes)
|
||||
es.encodeObject(oes.obj, oes.how, &oes.encoded)
|
||||
}
|
||||
}); err != nil {
|
||||
// Include the object in the error message.
|
||||
Failf("encoding error: %w\nfor object %#v", err, oes.obj.Interface())
|
||||
}
|
||||
|
||||
// Check that we have objects to serialize.
|
||||
if len(es.pending) == 0 {
|
||||
Failf("pending is empty?")
|
||||
}
|
||||
|
||||
// Write the header with the number of objects.
|
||||
if err := WriteHeader(es.w, uint64(len(es.pending)), true); err != nil {
|
||||
Failf("error writing header: %w", err)
|
||||
}
|
||||
|
||||
// Serialize all pending types and pending objects. Note that we don't
|
||||
// bother removing from this list as we walk it because that just
|
||||
// wastes time. It will not change after this point.
|
||||
if err := safely(func() {
|
||||
for _, wt := range es.pendingTypes {
|
||||
// Encode the type.
|
||||
wire.Save(es.w, &wt)
|
||||
}
|
||||
// Emit objects in ID order.
|
||||
ids := make([]objectID, 0, len(es.pending))
|
||||
for id := range es.pending {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Slice(ids, func(i, j int) bool {
|
||||
return ids[i] < ids[j]
|
||||
})
|
||||
for _, id := range ids {
|
||||
// Encode the id.
|
||||
wire.Save(es.w, wire.Uint(id))
|
||||
// Marshal the object.
|
||||
oes := es.pending[id]
|
||||
wire.Save(es.w, oes.encoded)
|
||||
}
|
||||
}); err != nil {
|
||||
// Include the object and the error.
|
||||
Failf("error serializing object %#v: %w", oes.encoded, err)
|
||||
}
|
||||
}
|
||||
|
||||
// objectFlag indicates that the length is a # of objects, rather than a raw
|
||||
// byte length. When this is set on a length header in the stream, it may be
|
||||
// decoded appropriately.
|
||||
const objectFlag uint64 = 1 << 63
|
||||
|
||||
// WriteHeader writes a header.
|
||||
//
|
||||
// Each object written to the statefile should be prefixed with a header. In
|
||||
// order to generate statefiles that play nicely with debugging tools, raw
|
||||
// writes should be prefixed with a header with object set to false and the
|
||||
// appropriate length. This will allow tools to skip these regions.
|
||||
func WriteHeader(w io.Writer, length uint64, object bool) error {
|
||||
// Sanity check the length.
|
||||
if length&objectFlag != 0 {
|
||||
Failf("impossibly huge length: %d", length)
|
||||
}
|
||||
if object {
|
||||
length |= objectFlag
|
||||
}
|
||||
|
||||
// Write a header.
|
||||
return safely(func() {
|
||||
wire.SaveUint(w, length)
|
||||
})
|
||||
}
|
||||
|
||||
// addrSetFunctions is used by addrSet.
|
||||
type addrSetFunctions struct{}
|
||||
|
||||
func (addrSetFunctions) MinKey() uintptr {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (addrSetFunctions) MaxKey() uintptr {
|
||||
return ^uintptr(0)
|
||||
}
|
||||
|
||||
func (addrSetFunctions) ClearValue(val **objectEncodeState) {
|
||||
*val = nil
|
||||
}
|
||||
|
||||
func (addrSetFunctions) Merge(r1 addrRange, val1 *objectEncodeState, r2 addrRange, val2 *objectEncodeState) (*objectEncodeState, bool) {
|
||||
if val1.obj == val2.obj {
|
||||
// This, should never happen. It would indicate that the same
|
||||
// object exists in two non-contiguous address ranges. Note
|
||||
// that this assertion can only be triggered if the race
|
||||
// detector is enabled.
|
||||
Failf("unexpected merge in addrSet @ %v and %v: %#v and %#v", r1, r2, val1.obj, val2.obj)
|
||||
}
|
||||
// Reject the merge.
|
||||
return val1, false
|
||||
}
|
||||
|
||||
func (addrSetFunctions) Split(r addrRange, val *objectEncodeState, _ uintptr) (*objectEncodeState, *objectEncodeState) {
|
||||
// A split should never happen: we don't remove ranges.
|
||||
Failf("unexpected split in addrSet @ %v: %#v", r, val.obj)
|
||||
panic("unreachable")
|
||||
}
|
||||
32
vendor/gvisor.dev/gvisor/pkg/state/encode_unsafe.go
vendored
Normal file
32
vendor/gvisor.dev/gvisor/pkg/state/encode_unsafe.go
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// arrayFromSlice constructs a new pointer to the slice data.
|
||||
//
|
||||
// It would be similar to the following:
|
||||
//
|
||||
// x := make([]Foo, l, c)
|
||||
// a := ([l]Foo*)(unsafe.Pointer(x[0]))
|
||||
func arrayFromSlice(obj reflect.Value) reflect.Value {
|
||||
return reflect.NewAt(
|
||||
reflect.ArrayOf(obj.Cap(), obj.Type().Elem()),
|
||||
unsafe.Pointer(obj.Pointer()))
|
||||
}
|
||||
324
vendor/gvisor.dev/gvisor/pkg/state/state.go
vendored
Normal file
324
vendor/gvisor.dev/gvisor/pkg/state/state.go
vendored
Normal file
@@ -0,0 +1,324 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package state provides functionality related to saving and loading object
|
||||
// graphs. For most types, it provides a set of default saving / loading logic
|
||||
// that will be invoked automatically if custom logic is not defined.
|
||||
//
|
||||
// Kind Support
|
||||
// ---- -------
|
||||
// Bool default
|
||||
// Int default
|
||||
// Int8 default
|
||||
// Int16 default
|
||||
// Int32 default
|
||||
// Int64 default
|
||||
// Uint default
|
||||
// Uint8 default
|
||||
// Uint16 default
|
||||
// Uint32 default
|
||||
// Uint64 default
|
||||
// Float32 default
|
||||
// Float64 default
|
||||
// Complex64 default
|
||||
// Complex128 default
|
||||
// Array default
|
||||
// Chan custom
|
||||
// Func custom
|
||||
// Interface default
|
||||
// Map default
|
||||
// Ptr default
|
||||
// Slice default
|
||||
// String default
|
||||
// Struct custom (*) Unless zero-sized.
|
||||
// UnsafePointer custom
|
||||
//
|
||||
// See README.md for an overview of how encoding and decoding works.
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state/wire"
|
||||
)
|
||||
|
||||
// objectID is a unique identifier assigned to each object to be serialized.
|
||||
// Each instance of an object is considered separately, i.e. if there are two
|
||||
// objects of the same type in the object graph being serialized, they'll be
|
||||
// assigned unique objectIDs.
|
||||
type objectID uint32
|
||||
|
||||
// typeID is the identifier for a type. Types are serialized and tracked
|
||||
// alongside objects in order to avoid the overhead of encoding field names in
|
||||
// all objects.
|
||||
type typeID uint32
|
||||
|
||||
// ErrState is returned when an error is encountered during encode/decode.
|
||||
type ErrState struct {
|
||||
// err is the underlying error.
|
||||
err error
|
||||
|
||||
// trace is the stack trace.
|
||||
trace string
|
||||
}
|
||||
|
||||
// Error returns a sensible description of the state error.
|
||||
func (e *ErrState) Error() string {
|
||||
return fmt.Sprintf("%v:\n%s", e.err, e.trace)
|
||||
}
|
||||
|
||||
// Unwrap implements standard unwrapping.
|
||||
func (e *ErrState) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Save saves the given object state.
|
||||
func Save(ctx context.Context, w io.Writer, rootPtr any) (Stats, error) {
|
||||
// Create the encoding state.
|
||||
es := encodeState{
|
||||
ctx: ctx,
|
||||
w: w,
|
||||
types: makeTypeEncodeDatabase(),
|
||||
zeroValues: make(map[reflect.Type]*objectEncodeState),
|
||||
pending: make(map[objectID]*objectEncodeState),
|
||||
encodedStructs: make(map[reflect.Value]*wire.Struct),
|
||||
}
|
||||
|
||||
// Perform the encoding.
|
||||
err := safely(func() {
|
||||
es.Save(reflect.ValueOf(rootPtr).Elem())
|
||||
})
|
||||
return es.stats, err
|
||||
}
|
||||
|
||||
// Load loads a checkpoint.
|
||||
func Load(ctx context.Context, r io.Reader, rootPtr any) (Stats, error) {
|
||||
// Create the decoding state.
|
||||
ds := decodeState{
|
||||
ctx: ctx,
|
||||
r: r,
|
||||
types: makeTypeDecodeDatabase(),
|
||||
deferred: make(map[objectID]wire.Object),
|
||||
}
|
||||
|
||||
// Attempt our decode.
|
||||
err := safely(func() {
|
||||
ds.Load(reflect.ValueOf(rootPtr).Elem())
|
||||
})
|
||||
return ds.stats, err
|
||||
}
|
||||
|
||||
// Sink is used for Type.StateSave.
|
||||
type Sink struct {
|
||||
internal objectEncoder
|
||||
}
|
||||
|
||||
// Save adds the given object to the map.
|
||||
//
|
||||
// You should pass always pointers to the object you are saving. For example:
|
||||
//
|
||||
// type X struct {
|
||||
// A int
|
||||
// B *int
|
||||
// }
|
||||
//
|
||||
// func (x *X) StateTypeInfo(m Sink) state.TypeInfo {
|
||||
// return state.TypeInfo{
|
||||
// Name: "pkg.X",
|
||||
// Fields: []string{
|
||||
// "A",
|
||||
// "B",
|
||||
// },
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func (x *X) StateSave(m Sink) {
|
||||
// m.Save(0, &x.A) // Field is A.
|
||||
// m.Save(1, &x.B) // Field is B.
|
||||
// }
|
||||
//
|
||||
// func (x *X) StateLoad(m Source) {
|
||||
// m.Load(0, &x.A) // Field is A.
|
||||
// m.Load(1, &x.B) // Field is B.
|
||||
// }
|
||||
func (s Sink) Save(slot int, objPtr any) {
|
||||
s.internal.save(slot, reflect.ValueOf(objPtr).Elem())
|
||||
}
|
||||
|
||||
// SaveValue adds the given object value to the map.
|
||||
//
|
||||
// This should be used for values where pointers are not available, or casts
|
||||
// are required during Save/Load.
|
||||
//
|
||||
// For example, if we want to cast external package type P.Foo to int64:
|
||||
//
|
||||
// func (x *X) StateSave(m Sink) {
|
||||
// m.SaveValue(0, "A", int64(x.A))
|
||||
// }
|
||||
//
|
||||
// func (x *X) StateLoad(m Source) {
|
||||
// m.LoadValue(0, new(int64), func(x any) {
|
||||
// x.A = P.Foo(x.(int64))
|
||||
// })
|
||||
// }
|
||||
func (s Sink) SaveValue(slot int, obj any) {
|
||||
s.internal.save(slot, reflect.ValueOf(obj))
|
||||
}
|
||||
|
||||
// Context returns the context object provided at save time.
|
||||
func (s Sink) Context() context.Context {
|
||||
return s.internal.es.ctx
|
||||
}
|
||||
|
||||
// Type is an interface that must be implemented by Struct objects. This allows
|
||||
// these objects to be serialized while minimizing runtime reflection required.
|
||||
//
|
||||
// All these methods can be automatically generated by the go_statify tool.
|
||||
type Type interface {
|
||||
// StateTypeName returns the type's name.
|
||||
//
|
||||
// This is used for matching type information during encoding and
|
||||
// decoding, as well as dynamic interface dispatch. This should be
|
||||
// globally unique.
|
||||
StateTypeName() string
|
||||
|
||||
// StateFields returns information about the type.
|
||||
//
|
||||
// Fields is the set of fields for the object. Calls to Sink.Save and
|
||||
// Source.Load must be made in-order with respect to these fields.
|
||||
//
|
||||
// This will be called at most once per serialization.
|
||||
StateFields() []string
|
||||
}
|
||||
|
||||
// SaverLoader must be implemented by struct types.
|
||||
type SaverLoader interface {
|
||||
// StateSave saves the state of the object to the given Map.
|
||||
StateSave(Sink)
|
||||
|
||||
// StateLoad loads the state of the object.
|
||||
StateLoad(context.Context, Source)
|
||||
}
|
||||
|
||||
// Source is used for Type.StateLoad.
|
||||
type Source struct {
|
||||
internal objectDecoder
|
||||
}
|
||||
|
||||
// Load loads the given object passed as a pointer..
|
||||
//
|
||||
// See Sink.Save for an example.
|
||||
func (s Source) Load(slot int, objPtr any) {
|
||||
s.internal.load(slot, reflect.ValueOf(objPtr), false, nil)
|
||||
}
|
||||
|
||||
// LoadWait loads the given objects from the map, and marks it as requiring all
|
||||
// AfterLoad executions to complete prior to running this object's AfterLoad.
|
||||
//
|
||||
// See Sink.Save for an example.
|
||||
func (s Source) LoadWait(slot int, objPtr any) {
|
||||
s.internal.load(slot, reflect.ValueOf(objPtr), true, nil)
|
||||
}
|
||||
|
||||
// LoadValue loads the given object value from the map.
|
||||
//
|
||||
// See Sink.SaveValue for an example.
|
||||
func (s Source) LoadValue(slot int, objPtr any, fn func(any)) {
|
||||
o := reflect.ValueOf(objPtr)
|
||||
s.internal.load(slot, o, true, func() { fn(o.Elem().Interface()) })
|
||||
}
|
||||
|
||||
// AfterLoad schedules a function execution when all objects have been
|
||||
// allocated and their automated loading and customized load logic have been
|
||||
// executed. fn will not be executed until all of current object's
|
||||
// dependencies' AfterLoad() logic, if exist, have been executed.
|
||||
func (s Source) AfterLoad(fn func()) {
|
||||
s.internal.afterLoad(fn)
|
||||
}
|
||||
|
||||
// Context returns the context object provided at load time.
|
||||
func (s Source) Context() context.Context {
|
||||
return s.internal.ds.ctx
|
||||
}
|
||||
|
||||
// IsZeroValue checks if the given value is the zero value.
|
||||
//
|
||||
// This function is used by the stateify tool.
|
||||
func IsZeroValue(val any) bool {
|
||||
return val == nil || reflect.ValueOf(val).Elem().IsZero()
|
||||
}
|
||||
|
||||
// Failf is a wrapper around panic that should be used to generate errors that
|
||||
// can be caught during saving and loading.
|
||||
func Failf(fmtStr string, v ...any) {
|
||||
panic(fmt.Errorf(fmtStr, v...))
|
||||
}
|
||||
|
||||
// safely executes the given function, catching a panic and unpacking as an
|
||||
// error.
|
||||
//
|
||||
// The error flow through the state package uses panic and recover. There are
|
||||
// two important reasons for this:
|
||||
//
|
||||
// 1) Many of the reflection methods will already panic with invalid data or
|
||||
// violated assumptions. We would want to recover anyways here.
|
||||
//
|
||||
// 2) It allows us to eliminate boilerplate within Save() and Load() functions.
|
||||
// In nearly all cases, when the low-level serialization functions fail, you
|
||||
// will want the checkpoint to fail anyways. Plumbing errors through every
|
||||
// method doesn't add a lot of value. If there are specific error conditions
|
||||
// that you'd like to handle, you should add appropriate functionality to
|
||||
// objects themselves prior to calling Save() and Load().
|
||||
func safely(fn func()) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if es, ok := r.(*ErrState); ok {
|
||||
err = es // Propagate.
|
||||
return
|
||||
}
|
||||
|
||||
// Build a new state error.
|
||||
es := new(ErrState)
|
||||
if e, ok := r.(error); ok {
|
||||
es.err = e
|
||||
} else {
|
||||
es.err = fmt.Errorf("%v", r)
|
||||
}
|
||||
|
||||
// Make a stack. We don't know how big it will be ahead
|
||||
// of time, but want to make sure we get the whole
|
||||
// thing. So we just do a stupid brute force approach.
|
||||
var stack []byte
|
||||
for sz := 1024; ; sz *= 2 {
|
||||
stack = make([]byte, sz)
|
||||
n := runtime.Stack(stack, false)
|
||||
if n < sz {
|
||||
es.trace = string(stack[:n])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Set the error.
|
||||
err = es
|
||||
}
|
||||
}()
|
||||
|
||||
// Execute the function.
|
||||
fn()
|
||||
return nil
|
||||
}
|
||||
20
vendor/gvisor.dev/gvisor/pkg/state/state_norace.go
vendored
Normal file
20
vendor/gvisor.dev/gvisor/pkg/state/state_norace.go
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !race
|
||||
// +build !race
|
||||
|
||||
package state
|
||||
|
||||
var raceEnabled = false
|
||||
20
vendor/gvisor.dev/gvisor/pkg/state/state_race.go
vendored
Normal file
20
vendor/gvisor.dev/gvisor/pkg/state/state_race.go
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build race
|
||||
// +build race
|
||||
|
||||
package state
|
||||
|
||||
var raceEnabled = true
|
||||
145
vendor/gvisor.dev/gvisor/pkg/state/stats.go
vendored
Normal file
145
vendor/gvisor.dev/gvisor/pkg/state/stats.go
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type statEntry struct {
|
||||
count uint
|
||||
total time.Duration
|
||||
}
|
||||
|
||||
// Stats tracks encode / decode timing.
|
||||
//
|
||||
// This currently provides a meaningful String function and no other way to
|
||||
// extract stats about individual types.
|
||||
//
|
||||
// All exported receivers accept nil.
|
||||
type Stats struct {
|
||||
// byType contains a breakdown of time spent by type.
|
||||
//
|
||||
// This is indexed *directly* by typeID, including zero.
|
||||
byType []statEntry
|
||||
|
||||
// stack contains objects in progress.
|
||||
stack []typeID
|
||||
|
||||
// names contains type names.
|
||||
//
|
||||
// This is also indexed *directly* by typeID, including zero, which we
|
||||
// hard-code as "state.default". This is only resolved by calling fini
|
||||
// on the stats object.
|
||||
names []string
|
||||
|
||||
// last is the last start time.
|
||||
last time.Time
|
||||
}
|
||||
|
||||
// init initializes statistics.
|
||||
func (s *Stats) init() {
|
||||
s.last = time.Now()
|
||||
s.stack = append(s.stack, 0)
|
||||
}
|
||||
|
||||
// fini finalizes statistics.
|
||||
func (s *Stats) fini(resolve func(id typeID) string) {
|
||||
s.done()
|
||||
|
||||
// Resolve all type names.
|
||||
s.names = make([]string, len(s.byType))
|
||||
s.names[0] = "state.default" // See above.
|
||||
for id := typeID(1); int(id) < len(s.names); id++ {
|
||||
s.names[id] = resolve(id)
|
||||
}
|
||||
}
|
||||
|
||||
// sample adds the samples to the given object.
|
||||
func (s *Stats) sample(id typeID) {
|
||||
now := time.Now()
|
||||
if len(s.byType) <= int(id) {
|
||||
// Allocate all the missing entries in one fell swoop.
|
||||
s.byType = append(s.byType, make([]statEntry, 1+int(id)-len(s.byType))...)
|
||||
}
|
||||
s.byType[id].total += now.Sub(s.last)
|
||||
s.last = now
|
||||
}
|
||||
|
||||
// start starts a sample.
|
||||
func (s *Stats) start(id typeID) {
|
||||
last := s.stack[len(s.stack)-1]
|
||||
s.sample(last)
|
||||
s.stack = append(s.stack, id)
|
||||
}
|
||||
|
||||
// done finishes the current sample.
|
||||
func (s *Stats) done() {
|
||||
last := s.stack[len(s.stack)-1]
|
||||
s.sample(last)
|
||||
s.byType[last].count++
|
||||
s.stack = s.stack[:len(s.stack)-1]
|
||||
}
|
||||
|
||||
type sliceEntry struct {
|
||||
name string
|
||||
entry *statEntry
|
||||
}
|
||||
|
||||
// String returns a table representation of the stats.
|
||||
func (s *Stats) String() string {
|
||||
// Build a list of stat entries.
|
||||
ss := make([]sliceEntry, 0, len(s.byType))
|
||||
for id := 0; id < len(s.names); id++ {
|
||||
ss = append(ss, sliceEntry{
|
||||
name: s.names[id],
|
||||
entry: &s.byType[id],
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by total time (descending).
|
||||
sort.Slice(ss, func(i, j int) bool {
|
||||
return ss[i].entry.total > ss[j].entry.total
|
||||
})
|
||||
|
||||
// Print the stat results.
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
count uint
|
||||
total time.Duration
|
||||
)
|
||||
buf.WriteString("\n")
|
||||
buf.WriteString(fmt.Sprintf("% 16s | % 8s | % 16s | %s\n", "total", "count", "per", "type"))
|
||||
buf.WriteString("-----------------+----------+------------------+----------------\n")
|
||||
for _, se := range ss {
|
||||
if se.entry.count == 0 {
|
||||
// Since we store all types linearly, we are not
|
||||
// guaranteed that any entry actually has time.
|
||||
continue
|
||||
}
|
||||
count += se.entry.count
|
||||
total += se.entry.total
|
||||
per := se.entry.total / time.Duration(se.entry.count)
|
||||
buf.WriteString(fmt.Sprintf("% 16s | %8d | % 16s | %s\n",
|
||||
se.entry.total, se.entry.count, per, se.name))
|
||||
}
|
||||
buf.WriteString("-----------------+----------+------------------+----------------\n")
|
||||
buf.WriteString(fmt.Sprintf("% 16s | % 8d | % 16s | [all]",
|
||||
total, count, total/time.Duration(count)))
|
||||
return string(buf.Bytes())
|
||||
}
|
||||
384
vendor/gvisor.dev/gvisor/pkg/state/types.go
vendored
Normal file
384
vendor/gvisor.dev/gvisor/pkg/state/types.go
vendored
Normal file
@@ -0,0 +1,384 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state/wire"
|
||||
)
|
||||
|
||||
// assertValidType asserts that the type is valid.
|
||||
func assertValidType(name string, fields []string) {
|
||||
if name == "" {
|
||||
Failf("type has empty name")
|
||||
}
|
||||
fieldsCopy := make([]string, len(fields))
|
||||
for i := 0; i < len(fields); i++ {
|
||||
if fields[i] == "" {
|
||||
Failf("field has empty name for type %q", name)
|
||||
}
|
||||
fieldsCopy[i] = fields[i]
|
||||
}
|
||||
sort.Slice(fieldsCopy, func(i, j int) bool {
|
||||
return fieldsCopy[i] < fieldsCopy[j]
|
||||
})
|
||||
for i := range fieldsCopy {
|
||||
if i > 0 && fieldsCopy[i-1] == fieldsCopy[i] {
|
||||
Failf("duplicate field %q for type %s", fieldsCopy[i], name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// typeEntry is an entry in the typeDatabase.
|
||||
type typeEntry struct {
|
||||
ID typeID
|
||||
wire.Type
|
||||
}
|
||||
|
||||
// reconciledTypeEntry is a reconciled entry in the typeDatabase.
|
||||
type reconciledTypeEntry struct {
|
||||
wire.Type
|
||||
LocalType reflect.Type
|
||||
FieldOrder []int
|
||||
}
|
||||
|
||||
// typeEncodeDatabase is an internal TypeInfo database for encoding.
|
||||
type typeEncodeDatabase struct {
|
||||
// byType maps by type to the typeEntry.
|
||||
byType map[reflect.Type]*typeEntry
|
||||
|
||||
// lastID is the last used ID.
|
||||
lastID typeID
|
||||
}
|
||||
|
||||
// makeTypeEncodeDatabase makes a typeDatabase.
|
||||
func makeTypeEncodeDatabase() typeEncodeDatabase {
|
||||
return typeEncodeDatabase{
|
||||
byType: make(map[reflect.Type]*typeEntry),
|
||||
}
|
||||
}
|
||||
|
||||
// typeDecodeDatabase is an internal TypeInfo database for decoding.
|
||||
type typeDecodeDatabase struct {
|
||||
// byID maps by ID to type.
|
||||
byID []*reconciledTypeEntry
|
||||
|
||||
// pending are entries that are pending validation by Lookup. These
|
||||
// will be reconciled with actual objects. Note that these will also be
|
||||
// used to lookup types by name, since they may not be reconciled and
|
||||
// there's little value to deleting from this map.
|
||||
pending []*wire.Type
|
||||
}
|
||||
|
||||
// makeTypeDecodeDatabase makes a typeDatabase.
|
||||
func makeTypeDecodeDatabase() typeDecodeDatabase {
|
||||
return typeDecodeDatabase{}
|
||||
}
|
||||
|
||||
// lookupNameFields extracts the name and fields from an object.
|
||||
func lookupNameFields(typ reflect.Type) (string, []string, bool) {
|
||||
v := reflect.Zero(reflect.PtrTo(typ)).Interface()
|
||||
t, ok := v.(Type)
|
||||
if !ok {
|
||||
// Is this a primitive?
|
||||
if typ.Kind() == reflect.Interface {
|
||||
return interfaceType, nil, true
|
||||
}
|
||||
name := typ.Name()
|
||||
if _, ok := primitiveTypeDatabase[name]; !ok {
|
||||
// This is not a known type, and not a primitive. The
|
||||
// encoder may proceed for anonymous empty structs, or
|
||||
// it may deference the type pointer and try again.
|
||||
return "", nil, false
|
||||
}
|
||||
return name, nil, true
|
||||
}
|
||||
// Sanity check the type.
|
||||
if raceEnabled {
|
||||
if _, ok := reverseTypeDatabase[typ]; !ok {
|
||||
// The type was not registered? Must be an embedded
|
||||
// structure or something else.
|
||||
return "", nil, false
|
||||
}
|
||||
}
|
||||
// Extract the name from the object.
|
||||
name := t.StateTypeName()
|
||||
fields := t.StateFields()
|
||||
assertValidType(name, fields)
|
||||
return name, fields, true
|
||||
}
|
||||
|
||||
// Lookup looks up or registers the given object.
|
||||
//
|
||||
// The bool indicates whether this is an existing entry: false means the entry
|
||||
// did not exist, and true means the entry did exist. If this bool is false and
|
||||
// the returned typeEntry are nil, then the obj did not implement the Type
|
||||
// interface.
|
||||
func (tdb *typeEncodeDatabase) Lookup(typ reflect.Type) (*typeEntry, bool) {
|
||||
te, ok := tdb.byType[typ]
|
||||
if !ok {
|
||||
// Lookup the type information.
|
||||
name, fields, ok := lookupNameFields(typ)
|
||||
if !ok {
|
||||
// Empty structs may still be encoded, so let the
|
||||
// caller decide what to do from here.
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Register the new type.
|
||||
tdb.lastID++
|
||||
te = &typeEntry{
|
||||
ID: tdb.lastID,
|
||||
Type: wire.Type{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
},
|
||||
}
|
||||
|
||||
// All done.
|
||||
tdb.byType[typ] = te
|
||||
return te, false
|
||||
}
|
||||
return te, true
|
||||
}
|
||||
|
||||
// Register adds a typeID entry.
|
||||
func (tbd *typeDecodeDatabase) Register(typ *wire.Type) {
|
||||
assertValidType(typ.Name, typ.Fields)
|
||||
tbd.pending = append(tbd.pending, typ)
|
||||
}
|
||||
|
||||
// LookupName looks up the type name by ID.
|
||||
func (tbd *typeDecodeDatabase) LookupName(id typeID) string {
|
||||
if len(tbd.pending) < int(id) {
|
||||
// This is likely an encoder error?
|
||||
Failf("type ID %d not available", id)
|
||||
}
|
||||
return tbd.pending[id-1].Name
|
||||
}
|
||||
|
||||
// LookupType looks up the type by ID.
|
||||
func (tbd *typeDecodeDatabase) LookupType(id typeID) reflect.Type {
|
||||
name := tbd.LookupName(id)
|
||||
typ, ok := globalTypeDatabase[name]
|
||||
if !ok {
|
||||
// If not available, see if it's primitive.
|
||||
typ, ok = primitiveTypeDatabase[name]
|
||||
if !ok && name == interfaceType {
|
||||
// Matches the built-in interface type.
|
||||
var i any
|
||||
return reflect.TypeOf(&i).Elem()
|
||||
}
|
||||
if !ok {
|
||||
// The type is perhaps not registered?
|
||||
Failf("type name %q is not available", name)
|
||||
}
|
||||
return typ // Primitive type.
|
||||
}
|
||||
return typ // Registered type.
|
||||
}
|
||||
|
||||
// singleFieldOrder defines the field order for a single field.
|
||||
var singleFieldOrder = []int{0}
|
||||
|
||||
// Lookup looks up or registers the given object.
|
||||
//
|
||||
// First, the typeID is searched to see if this has already been appropriately
|
||||
// reconciled. If no, then a reconciliation will take place that may result in a
|
||||
// field ordering. If a nil reconciledTypeEntry is returned from this method,
|
||||
// then the object does not support the Type interface.
|
||||
//
|
||||
// This method never returns nil.
|
||||
func (tbd *typeDecodeDatabase) Lookup(id typeID, typ reflect.Type) *reconciledTypeEntry {
|
||||
if len(tbd.byID) > int(id) && tbd.byID[id-1] != nil {
|
||||
// Already reconciled.
|
||||
return tbd.byID[id-1]
|
||||
}
|
||||
// The ID has not been reconciled yet. That's fine. We need to make
|
||||
// sure it aligns with the current provided object.
|
||||
if len(tbd.pending) < int(id) {
|
||||
// This id was never registered. Probably an encoder error?
|
||||
Failf("typeDatabase does not contain id %d", id)
|
||||
}
|
||||
// Extract the pending info.
|
||||
pending := tbd.pending[id-1]
|
||||
// Grow the byID list.
|
||||
if len(tbd.byID) < int(id) {
|
||||
tbd.byID = append(tbd.byID, make([]*reconciledTypeEntry, int(id)-len(tbd.byID))...)
|
||||
}
|
||||
// Reconcile the type.
|
||||
name, fields, ok := lookupNameFields(typ)
|
||||
if !ok {
|
||||
// Empty structs are decoded only when the type is nil. Since
|
||||
// this isn't the case, we fail here.
|
||||
Failf("unsupported type %q during decode; can't reconcile", pending.Name)
|
||||
}
|
||||
if name != pending.Name {
|
||||
// Are these the same type? Print a helpful message as this may
|
||||
// actually happen in practice if types change.
|
||||
Failf("typeDatabase contains conflicting definitions for id %d: %s->%v (current) and %s->%v (existing)",
|
||||
id, name, fields, pending.Name, pending.Fields)
|
||||
}
|
||||
rte := &reconciledTypeEntry{
|
||||
Type: wire.Type{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
},
|
||||
LocalType: typ,
|
||||
}
|
||||
// If there are zero or one fields, then we skip allocating the field
|
||||
// slice. There is special handling for decoding in this case. If the
|
||||
// field name does not match, it will be caught in the general purpose
|
||||
// code below.
|
||||
if len(fields) != len(pending.Fields) {
|
||||
Failf("type %q contains different fields: %v (decode) and %v (encode)",
|
||||
name, fields, pending.Fields)
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
tbd.byID[id-1] = rte // Save.
|
||||
return rte
|
||||
}
|
||||
if len(fields) == 1 && fields[0] == pending.Fields[0] {
|
||||
tbd.byID[id-1] = rte // Save.
|
||||
rte.FieldOrder = singleFieldOrder
|
||||
return rte
|
||||
}
|
||||
// For each field in the current object's information, match it to a
|
||||
// field in the destination object. We know from the assertion above
|
||||
// and the insertion on insertion to pending that neither field
|
||||
// contains any duplicates.
|
||||
fieldOrder := make([]int, len(fields))
|
||||
for i, name := range fields {
|
||||
fieldOrder[i] = -1 // Sentinel.
|
||||
// Is it an exact match?
|
||||
if pending.Fields[i] == name {
|
||||
fieldOrder[i] = i
|
||||
continue
|
||||
}
|
||||
// Find the matching field.
|
||||
for j, otherName := range pending.Fields {
|
||||
if name == otherName {
|
||||
fieldOrder[i] = j
|
||||
break
|
||||
}
|
||||
}
|
||||
if fieldOrder[i] == -1 {
|
||||
// The type name matches but we are lacking some common fields.
|
||||
Failf("type %q has mismatched fields: %v (decode) and %v (encode)",
|
||||
name, fields, pending.Fields)
|
||||
}
|
||||
}
|
||||
// The type has been reeconciled.
|
||||
rte.FieldOrder = fieldOrder
|
||||
tbd.byID[id-1] = rte
|
||||
return rte
|
||||
}
|
||||
|
||||
// interfaceType defines all interfaces.
|
||||
const interfaceType = "interface"
|
||||
|
||||
// primitiveTypeDatabase is a set of fixed types.
|
||||
var primitiveTypeDatabase = func() map[string]reflect.Type {
|
||||
r := make(map[string]reflect.Type)
|
||||
for _, t := range []reflect.Type{
|
||||
reflect.TypeOf(false),
|
||||
reflect.TypeOf(int(0)),
|
||||
reflect.TypeOf(int8(0)),
|
||||
reflect.TypeOf(int16(0)),
|
||||
reflect.TypeOf(int32(0)),
|
||||
reflect.TypeOf(int64(0)),
|
||||
reflect.TypeOf(uint(0)),
|
||||
reflect.TypeOf(uintptr(0)),
|
||||
reflect.TypeOf(uint8(0)),
|
||||
reflect.TypeOf(uint16(0)),
|
||||
reflect.TypeOf(uint32(0)),
|
||||
reflect.TypeOf(uint64(0)),
|
||||
reflect.TypeOf(""),
|
||||
reflect.TypeOf(float32(0.0)),
|
||||
reflect.TypeOf(float64(0.0)),
|
||||
reflect.TypeOf(complex64(0.0)),
|
||||
reflect.TypeOf(complex128(0.0)),
|
||||
} {
|
||||
r[t.Name()] = t
|
||||
}
|
||||
return r
|
||||
}()
|
||||
|
||||
// globalTypeDatabase is used for dispatching interfaces on decode.
|
||||
var globalTypeDatabase = map[string]reflect.Type{}
|
||||
|
||||
// reverseTypeDatabase is a reverse mapping.
|
||||
var reverseTypeDatabase = map[reflect.Type]string{}
|
||||
|
||||
// Release releases references to global type databases.
|
||||
// Must only be called in contexts where they will definitely never be used,
|
||||
// in order to save memory.
|
||||
func Release() {
|
||||
globalTypeDatabase = nil
|
||||
reverseTypeDatabase = nil
|
||||
}
|
||||
|
||||
// Register registers a type.
|
||||
//
|
||||
// This must be called on init and only done once.
|
||||
func Register(t Type) {
|
||||
name := t.StateTypeName()
|
||||
typ := reflect.TypeOf(t)
|
||||
if raceEnabled {
|
||||
assertValidType(name, t.StateFields())
|
||||
// Register must always be called on pointers.
|
||||
if typ.Kind() != reflect.Ptr {
|
||||
Failf("Register must be called on pointers")
|
||||
}
|
||||
}
|
||||
typ = typ.Elem()
|
||||
if raceEnabled {
|
||||
if typ.Kind() == reflect.Struct {
|
||||
// All registered structs must implement SaverLoader. We allow
|
||||
// the registration is non-struct types with just the Type
|
||||
// interface, but we need to call StateSave/StateLoad methods
|
||||
// on aggregate types.
|
||||
if _, ok := t.(SaverLoader); !ok {
|
||||
Failf("struct %T does not implement SaverLoader", t)
|
||||
}
|
||||
} else {
|
||||
// Non-structs must not have any fields. We don't support
|
||||
// calling StateSave/StateLoad methods on any non-struct types.
|
||||
// If custom behavior is required, these types should be
|
||||
// wrapped in a structure of some kind.
|
||||
if fields := t.StateFields(); len(fields) != 0 {
|
||||
Failf("non-struct %T has non-zero fields %v", t, fields)
|
||||
}
|
||||
// We don't allow non-structs to implement StateSave/StateLoad
|
||||
// methods, because they won't be called and it's confusing.
|
||||
if _, ok := t.(SaverLoader); ok {
|
||||
Failf("non-struct %T implements SaverLoader", t)
|
||||
}
|
||||
}
|
||||
if _, ok := primitiveTypeDatabase[name]; ok {
|
||||
Failf("conflicting primitiveTypeDatabase entry for %T: used by primitive", t)
|
||||
}
|
||||
if _, ok := globalTypeDatabase[name]; ok {
|
||||
Failf("conflicting globalTypeDatabase entries for %T: name conflict", t)
|
||||
}
|
||||
if name == interfaceType {
|
||||
Failf("conflicting name for %T: matches interfaceType", t)
|
||||
}
|
||||
reverseTypeDatabase[typ] = name
|
||||
}
|
||||
globalTypeDatabase[name] = typ
|
||||
}
|
||||
976
vendor/gvisor.dev/gvisor/pkg/state/wire/wire.go
vendored
Normal file
976
vendor/gvisor.dev/gvisor/pkg/state/wire/wire.go
vendored
Normal file
@@ -0,0 +1,976 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package wire contains a few basic types that can be composed to serialize
|
||||
// graph information for the state package. This package defines the wire
|
||||
// protocol.
|
||||
//
|
||||
// Note that these types are careful about how they implement the relevant
|
||||
// interfaces (either value receiver or pointer receiver), so that native-sized
|
||||
// types, such as integers and simple pointers, can fit inside the interface
|
||||
// object.
|
||||
//
|
||||
// This package also uses panic as control flow, so called should be careful to
|
||||
// wrap calls in appropriate handlers.
|
||||
//
|
||||
// Testing for this package is driven by the state test package.
|
||||
package wire
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/gohacks"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
var oneByteArrayPool = sync.Pool{
|
||||
New: func() any { return &[1]byte{} },
|
||||
}
|
||||
|
||||
// readFull is a utility. The equivalent is not needed for Write, but the API
|
||||
// contract dictates that it must always complete all bytes given or return an
|
||||
// error.
|
||||
func readFull(r io.Reader, p []byte) {
|
||||
for done := 0; done < len(p); {
|
||||
n, err := r.Read(p[done:])
|
||||
done += n
|
||||
if n == 0 && err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Object is a generic object.
|
||||
type Object interface {
|
||||
// save saves the given object.
|
||||
//
|
||||
// Panic is used for error control flow.
|
||||
save(io.Writer)
|
||||
|
||||
// load loads a new object of the given type.
|
||||
//
|
||||
// Panic is used for error control flow.
|
||||
load(io.Reader) Object
|
||||
}
|
||||
|
||||
// Bool is a boolean.
|
||||
type Bool bool
|
||||
|
||||
// loadBool loads an object of type Bool.
|
||||
func loadBool(r io.Reader) Bool {
|
||||
b := loadUint(r)
|
||||
return Bool(b == 1)
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (b Bool) save(w io.Writer) {
|
||||
var v Uint
|
||||
if b {
|
||||
v = 1
|
||||
} else {
|
||||
v = 0
|
||||
}
|
||||
v.save(w)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (Bool) load(r io.Reader) Object { return loadBool(r) }
|
||||
|
||||
// Int is a signed integer.
|
||||
//
|
||||
// This uses varint encoding.
|
||||
type Int int64
|
||||
|
||||
// loadInt loads an object of type Int.
|
||||
func loadInt(r io.Reader) Int {
|
||||
u := loadUint(r)
|
||||
x := Int(u >> 1)
|
||||
if u&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (i Int) save(w io.Writer) {
|
||||
u := Uint(i) << 1
|
||||
if i < 0 {
|
||||
u = ^u
|
||||
}
|
||||
u.save(w)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (Int) load(r io.Reader) Object { return loadInt(r) }
|
||||
|
||||
// Uint is an unsigned integer.
|
||||
type Uint uint64
|
||||
|
||||
func readByte(r io.Reader) byte {
|
||||
p := oneByteArrayPool.Get().(*[1]byte)
|
||||
defer oneByteArrayPool.Put(p)
|
||||
n, err := r.Read(p[:])
|
||||
if n != 1 {
|
||||
panic(err)
|
||||
}
|
||||
return p[0]
|
||||
}
|
||||
|
||||
// loadUint loads an object of type Uint.
|
||||
func loadUint(r io.Reader) Uint {
|
||||
var (
|
||||
u Uint
|
||||
s uint
|
||||
)
|
||||
for i := 0; i <= 9; i++ {
|
||||
b := readByte(r)
|
||||
if b < 0x80 {
|
||||
if i == 9 && b > 1 {
|
||||
panic("overflow")
|
||||
}
|
||||
u |= Uint(b) << s
|
||||
return u
|
||||
}
|
||||
u |= Uint(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func writeByte(w io.Writer, b byte) {
|
||||
p := oneByteArrayPool.Get().(*[1]byte)
|
||||
defer oneByteArrayPool.Put(p)
|
||||
p[0] = b
|
||||
n, err := w.Write(p[:])
|
||||
if n != 1 {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (u Uint) save(w io.Writer) {
|
||||
for u >= 0x80 {
|
||||
writeByte(w, byte(u)|0x80)
|
||||
u >>= 7
|
||||
}
|
||||
writeByte(w, byte(u))
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (Uint) load(r io.Reader) Object { return loadUint(r) }
|
||||
|
||||
// Float32 is a 32-bit floating point number.
|
||||
type Float32 float32
|
||||
|
||||
// loadFloat32 loads an object of type Float32.
|
||||
func loadFloat32(r io.Reader) Float32 {
|
||||
n := loadUint(r)
|
||||
return Float32(math.Float32frombits(uint32(n)))
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (f Float32) save(w io.Writer) {
|
||||
n := Uint(math.Float32bits(float32(f)))
|
||||
n.save(w)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (Float32) load(r io.Reader) Object { return loadFloat32(r) }
|
||||
|
||||
// Float64 is a 64-bit floating point number.
|
||||
type Float64 float64
|
||||
|
||||
// loadFloat64 loads an object of type Float64.
|
||||
func loadFloat64(r io.Reader) Float64 {
|
||||
n := loadUint(r)
|
||||
return Float64(math.Float64frombits(uint64(n)))
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (f Float64) save(w io.Writer) {
|
||||
n := Uint(math.Float64bits(float64(f)))
|
||||
n.save(w)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (Float64) load(r io.Reader) Object { return loadFloat64(r) }
|
||||
|
||||
// Complex64 is a 64-bit complex number.
|
||||
type Complex64 complex128
|
||||
|
||||
// loadComplex64 loads an object of type Complex64.
|
||||
func loadComplex64(r io.Reader) Complex64 {
|
||||
re := loadFloat32(r)
|
||||
im := loadFloat32(r)
|
||||
return Complex64(complex(float32(re), float32(im)))
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (c *Complex64) save(w io.Writer) {
|
||||
re := Float32(real(*c))
|
||||
im := Float32(imag(*c))
|
||||
re.save(w)
|
||||
im.save(w)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Complex64) load(r io.Reader) Object {
|
||||
c := loadComplex64(r)
|
||||
return &c
|
||||
}
|
||||
|
||||
// Complex128 is a 128-bit complex number.
|
||||
type Complex128 complex128
|
||||
|
||||
// loadComplex128 loads an object of type Complex128.
|
||||
func loadComplex128(r io.Reader) Complex128 {
|
||||
re := loadFloat64(r)
|
||||
im := loadFloat64(r)
|
||||
return Complex128(complex(float64(re), float64(im)))
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (c *Complex128) save(w io.Writer) {
|
||||
re := Float64(real(*c))
|
||||
im := Float64(imag(*c))
|
||||
re.save(w)
|
||||
im.save(w)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Complex128) load(r io.Reader) Object {
|
||||
c := loadComplex128(r)
|
||||
return &c
|
||||
}
|
||||
|
||||
// String is a string.
|
||||
type String string
|
||||
|
||||
// loadString loads an object of type String.
|
||||
func loadString(r io.Reader) String {
|
||||
l := loadUint(r)
|
||||
p := make([]byte, l)
|
||||
readFull(r, p)
|
||||
return String(gohacks.StringFromImmutableBytes(p))
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (s *String) save(w io.Writer) {
|
||||
l := Uint(len(*s))
|
||||
l.save(w)
|
||||
p := gohacks.ImmutableBytesFromString(string(*s))
|
||||
_, err := w.Write(p) // Must write all bytes.
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*String) load(r io.Reader) Object {
|
||||
s := loadString(r)
|
||||
return &s
|
||||
}
|
||||
|
||||
// Dot is a kind of reference: one of Index and FieldName.
|
||||
type Dot interface {
|
||||
isDot()
|
||||
}
|
||||
|
||||
// Index is a reference resolution.
|
||||
type Index uint32
|
||||
|
||||
func (Index) isDot() {}
|
||||
|
||||
// FieldName is a reference resolution.
|
||||
type FieldName string
|
||||
|
||||
func (*FieldName) isDot() {}
|
||||
|
||||
// Ref is a reference to an object.
|
||||
type Ref struct {
|
||||
// Root is the root object.
|
||||
Root Uint
|
||||
|
||||
// Dots is the set of traversals required from the Root object above.
|
||||
// Note that this will be stored in reverse order for efficiency.
|
||||
Dots []Dot
|
||||
|
||||
// Type is the base type for the root object. This is non-nil iff Dots
|
||||
// is non-zero length (that is, this is a complex reference). This is
|
||||
// not *strictly* necessary, but can be used to simplify decoding.
|
||||
Type TypeSpec
|
||||
}
|
||||
|
||||
// loadRef loads an object of type Ref (abstract).
|
||||
func loadRef(r io.Reader) Ref {
|
||||
ref := Ref{
|
||||
Root: loadUint(r),
|
||||
}
|
||||
l := loadUint(r)
|
||||
ref.Dots = make([]Dot, l)
|
||||
for i := 0; i < int(l); i++ {
|
||||
// Disambiguate between an Index (non-negative) and a field
|
||||
// name (negative). This does some space and avoids a dedicate
|
||||
// loadDot function. See Ref.save for the other side.
|
||||
d := loadInt(r)
|
||||
if d >= 0 {
|
||||
ref.Dots[i] = Index(d)
|
||||
continue
|
||||
}
|
||||
p := make([]byte, -d)
|
||||
readFull(r, p)
|
||||
fieldName := FieldName(gohacks.StringFromImmutableBytes(p))
|
||||
ref.Dots[i] = &fieldName
|
||||
}
|
||||
if l != 0 {
|
||||
// Only if dots is non-zero.
|
||||
ref.Type = loadTypeSpec(r)
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (r *Ref) save(w io.Writer) {
|
||||
r.Root.save(w)
|
||||
l := Uint(len(r.Dots))
|
||||
l.save(w)
|
||||
for _, d := range r.Dots {
|
||||
// See LoadRef. We use non-negative numbers to encode Index
|
||||
// objects and negative numbers to encode field lengths.
|
||||
switch x := d.(type) {
|
||||
case Index:
|
||||
i := Int(x)
|
||||
i.save(w)
|
||||
case *FieldName:
|
||||
d := Int(-len(*x))
|
||||
d.save(w)
|
||||
p := gohacks.ImmutableBytesFromString(string(*x))
|
||||
if _, err := w.Write(p); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
default:
|
||||
panic("unknown dot implementation")
|
||||
}
|
||||
}
|
||||
if l != 0 {
|
||||
// See above.
|
||||
saveTypeSpec(w, r.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Ref) load(r io.Reader) Object {
|
||||
ref := loadRef(r)
|
||||
return &ref
|
||||
}
|
||||
|
||||
// Nil is a primitive zero value of any type.
|
||||
type Nil struct{}
|
||||
|
||||
// loadNil loads an object of type Nil.
|
||||
func loadNil(r io.Reader) Nil {
|
||||
return Nil{}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (Nil) save(w io.Writer) {}
|
||||
|
||||
// load implements Object.load.
|
||||
func (Nil) load(r io.Reader) Object { return loadNil(r) }
|
||||
|
||||
// Slice is a slice value.
|
||||
type Slice struct {
|
||||
Length Uint
|
||||
Capacity Uint
|
||||
Ref Ref
|
||||
}
|
||||
|
||||
// loadSlice loads an object of type Slice.
|
||||
func loadSlice(r io.Reader) Slice {
|
||||
return Slice{
|
||||
Length: loadUint(r),
|
||||
Capacity: loadUint(r),
|
||||
Ref: loadRef(r),
|
||||
}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (s *Slice) save(w io.Writer) {
|
||||
s.Length.save(w)
|
||||
s.Capacity.save(w)
|
||||
s.Ref.save(w)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Slice) load(r io.Reader) Object {
|
||||
s := loadSlice(r)
|
||||
return &s
|
||||
}
|
||||
|
||||
// Array is an array value.
|
||||
type Array struct {
|
||||
Contents []Object
|
||||
}
|
||||
|
||||
// loadArray loads an object of type Array.
|
||||
func loadArray(r io.Reader) Array {
|
||||
l := loadUint(r)
|
||||
if l == 0 {
|
||||
// Note that there isn't a single object available to encode
|
||||
// the type of, so we need this additional branch.
|
||||
return Array{}
|
||||
}
|
||||
// All the objects here have the same type, so use dynamic dispatch
|
||||
// only once. All other objects will automatically take the same type
|
||||
// as the first object.
|
||||
contents := make([]Object, l)
|
||||
v := Load(r)
|
||||
contents[0] = v
|
||||
for i := 1; i < int(l); i++ {
|
||||
contents[i] = v.load(r)
|
||||
}
|
||||
return Array{
|
||||
Contents: contents,
|
||||
}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (a *Array) save(w io.Writer) {
|
||||
l := Uint(len(a.Contents))
|
||||
l.save(w)
|
||||
if l == 0 {
|
||||
// See LoadArray.
|
||||
return
|
||||
}
|
||||
// See above.
|
||||
Save(w, a.Contents[0])
|
||||
for i := 1; i < int(l); i++ {
|
||||
a.Contents[i].save(w)
|
||||
}
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Array) load(r io.Reader) Object {
|
||||
a := loadArray(r)
|
||||
return &a
|
||||
}
|
||||
|
||||
// Map is a map value.
|
||||
type Map struct {
|
||||
Keys []Object
|
||||
Values []Object
|
||||
}
|
||||
|
||||
// loadMap loads an object of type Map.
|
||||
func loadMap(r io.Reader) Map {
|
||||
l := loadUint(r)
|
||||
if l == 0 {
|
||||
// See LoadArray.
|
||||
return Map{}
|
||||
}
|
||||
// See type dispatch notes in Array.
|
||||
keys := make([]Object, l)
|
||||
values := make([]Object, l)
|
||||
k := Load(r)
|
||||
v := Load(r)
|
||||
keys[0] = k
|
||||
values[0] = v
|
||||
for i := 1; i < int(l); i++ {
|
||||
keys[i] = k.load(r)
|
||||
values[i] = v.load(r)
|
||||
}
|
||||
return Map{
|
||||
Keys: keys,
|
||||
Values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (m *Map) save(w io.Writer) {
|
||||
l := Uint(len(m.Keys))
|
||||
if int(l) != len(m.Values) {
|
||||
panic(fmt.Sprintf("mismatched keys (%d) Aand values (%d)", len(m.Keys), len(m.Values)))
|
||||
}
|
||||
l.save(w)
|
||||
if l == 0 {
|
||||
// See LoadArray.
|
||||
return
|
||||
}
|
||||
// See above.
|
||||
Save(w, m.Keys[0])
|
||||
Save(w, m.Values[0])
|
||||
for i := 1; i < int(l); i++ {
|
||||
m.Keys[i].save(w)
|
||||
m.Values[i].save(w)
|
||||
}
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Map) load(r io.Reader) Object {
|
||||
m := loadMap(r)
|
||||
return &m
|
||||
}
|
||||
|
||||
// TypeSpec is a type dereference.
|
||||
type TypeSpec interface {
|
||||
isTypeSpec()
|
||||
}
|
||||
|
||||
// TypeID is a concrete type ID.
|
||||
type TypeID Uint
|
||||
|
||||
func (TypeID) isTypeSpec() {}
|
||||
|
||||
// TypeSpecPointer is a pointer type.
|
||||
type TypeSpecPointer struct {
|
||||
Type TypeSpec
|
||||
}
|
||||
|
||||
func (*TypeSpecPointer) isTypeSpec() {}
|
||||
|
||||
// TypeSpecArray is an array type.
|
||||
type TypeSpecArray struct {
|
||||
Count Uint
|
||||
Type TypeSpec
|
||||
}
|
||||
|
||||
func (*TypeSpecArray) isTypeSpec() {}
|
||||
|
||||
// TypeSpecSlice is a slice type.
|
||||
type TypeSpecSlice struct {
|
||||
Type TypeSpec
|
||||
}
|
||||
|
||||
func (*TypeSpecSlice) isTypeSpec() {}
|
||||
|
||||
// TypeSpecMap is a map type.
|
||||
type TypeSpecMap struct {
|
||||
Key TypeSpec
|
||||
Value TypeSpec
|
||||
}
|
||||
|
||||
func (*TypeSpecMap) isTypeSpec() {}
|
||||
|
||||
// TypeSpecNil is an empty type.
|
||||
type TypeSpecNil struct{}
|
||||
|
||||
func (TypeSpecNil) isTypeSpec() {}
|
||||
|
||||
// TypeSpec types.
|
||||
//
|
||||
// These use a distinct encoding on the wire, as they are used only in the
|
||||
// interface object. They are decoded through the dedicated loadTypeSpec and
|
||||
// saveTypeSpec functions.
|
||||
const (
|
||||
typeSpecTypeID Uint = iota
|
||||
typeSpecPointer
|
||||
typeSpecArray
|
||||
typeSpecSlice
|
||||
typeSpecMap
|
||||
typeSpecNil
|
||||
)
|
||||
|
||||
// loadTypeSpec loads TypeSpec values.
|
||||
func loadTypeSpec(r io.Reader) TypeSpec {
|
||||
switch hdr := loadUint(r); hdr {
|
||||
case typeSpecTypeID:
|
||||
return TypeID(loadUint(r))
|
||||
case typeSpecPointer:
|
||||
return &TypeSpecPointer{
|
||||
Type: loadTypeSpec(r),
|
||||
}
|
||||
case typeSpecArray:
|
||||
return &TypeSpecArray{
|
||||
Count: loadUint(r),
|
||||
Type: loadTypeSpec(r),
|
||||
}
|
||||
case typeSpecSlice:
|
||||
return &TypeSpecSlice{
|
||||
Type: loadTypeSpec(r),
|
||||
}
|
||||
case typeSpecMap:
|
||||
return &TypeSpecMap{
|
||||
Key: loadTypeSpec(r),
|
||||
Value: loadTypeSpec(r),
|
||||
}
|
||||
case typeSpecNil:
|
||||
return TypeSpecNil{}
|
||||
default:
|
||||
// This is not a valid stream?
|
||||
panic(fmt.Errorf("unknown header: %d", hdr))
|
||||
}
|
||||
}
|
||||
|
||||
// saveTypeSpec saves TypeSpec values.
|
||||
func saveTypeSpec(w io.Writer, t TypeSpec) {
|
||||
switch x := t.(type) {
|
||||
case TypeID:
|
||||
typeSpecTypeID.save(w)
|
||||
Uint(x).save(w)
|
||||
case *TypeSpecPointer:
|
||||
typeSpecPointer.save(w)
|
||||
saveTypeSpec(w, x.Type)
|
||||
case *TypeSpecArray:
|
||||
typeSpecArray.save(w)
|
||||
x.Count.save(w)
|
||||
saveTypeSpec(w, x.Type)
|
||||
case *TypeSpecSlice:
|
||||
typeSpecSlice.save(w)
|
||||
saveTypeSpec(w, x.Type)
|
||||
case *TypeSpecMap:
|
||||
typeSpecMap.save(w)
|
||||
saveTypeSpec(w, x.Key)
|
||||
saveTypeSpec(w, x.Value)
|
||||
case TypeSpecNil:
|
||||
typeSpecNil.save(w)
|
||||
default:
|
||||
// This should not happen?
|
||||
panic(fmt.Errorf("unknown type %T", t))
|
||||
}
|
||||
}
|
||||
|
||||
// Interface is an interface value.
|
||||
type Interface struct {
|
||||
Type TypeSpec
|
||||
Value Object
|
||||
}
|
||||
|
||||
// loadInterface loads an object of type Interface.
|
||||
func loadInterface(r io.Reader) Interface {
|
||||
return Interface{
|
||||
Type: loadTypeSpec(r),
|
||||
Value: Load(r),
|
||||
}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (i *Interface) save(w io.Writer) {
|
||||
saveTypeSpec(w, i.Type)
|
||||
Save(w, i.Value)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Interface) load(r io.Reader) Object {
|
||||
i := loadInterface(r)
|
||||
return &i
|
||||
}
|
||||
|
||||
// Type is type information.
|
||||
type Type struct {
|
||||
Name string
|
||||
Fields []string
|
||||
}
|
||||
|
||||
// loadType loads an object of type Type.
|
||||
func loadType(r io.Reader) Type {
|
||||
name := string(loadString(r))
|
||||
l := loadUint(r)
|
||||
fields := make([]string, l)
|
||||
for i := 0; i < int(l); i++ {
|
||||
fields[i] = string(loadString(r))
|
||||
}
|
||||
return Type{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (t *Type) save(w io.Writer) {
|
||||
s := String(t.Name)
|
||||
s.save(w)
|
||||
l := Uint(len(t.Fields))
|
||||
l.save(w)
|
||||
for i := 0; i < int(l); i++ {
|
||||
s := String(t.Fields[i])
|
||||
s.save(w)
|
||||
}
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Type) load(r io.Reader) Object {
|
||||
t := loadType(r)
|
||||
return &t
|
||||
}
|
||||
|
||||
// multipleObjects is a special type for serializing multiple objects.
|
||||
type multipleObjects []Object
|
||||
|
||||
// loadMultipleObjects loads a series of objects.
|
||||
func loadMultipleObjects(r io.Reader) multipleObjects {
|
||||
l := loadUint(r)
|
||||
m := make(multipleObjects, l)
|
||||
for i := 0; i < int(l); i++ {
|
||||
m[i] = Load(r)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
func (m *multipleObjects) save(w io.Writer) {
|
||||
l := Uint(len(*m))
|
||||
l.save(w)
|
||||
for i := 0; i < int(l); i++ {
|
||||
Save(w, (*m)[i])
|
||||
}
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*multipleObjects) load(r io.Reader) Object {
|
||||
m := loadMultipleObjects(r)
|
||||
return &m
|
||||
}
|
||||
|
||||
// noObjects represents no objects.
|
||||
type noObjects struct{}
|
||||
|
||||
// loadNoObjects loads a sentinel.
|
||||
func loadNoObjects(r io.Reader) noObjects { return noObjects{} }
|
||||
|
||||
// save implements Object.save.
|
||||
func (noObjects) save(w io.Writer) {}
|
||||
|
||||
// load implements Object.load.
|
||||
func (noObjects) load(r io.Reader) Object { return loadNoObjects(r) }
|
||||
|
||||
// Struct is a basic composite value.
|
||||
type Struct struct {
|
||||
TypeID TypeID
|
||||
fields Object // Optionally noObjects or *multipleObjects.
|
||||
}
|
||||
|
||||
// Field returns a pointer to the given field slot.
|
||||
//
|
||||
// This must be called after Alloc.
|
||||
func (s *Struct) Field(i int) *Object {
|
||||
if fields, ok := s.fields.(*multipleObjects); ok {
|
||||
return &((*fields)[i])
|
||||
}
|
||||
if _, ok := s.fields.(noObjects); ok {
|
||||
// Alloc may be optionally called; can't call twice.
|
||||
panic("Field called inappropriately, wrong Alloc?")
|
||||
}
|
||||
return &s.fields
|
||||
}
|
||||
|
||||
// Alloc allocates the given number of fields.
|
||||
//
|
||||
// This must be called before Add and Save.
|
||||
//
|
||||
// Precondition: slots must be positive.
|
||||
func (s *Struct) Alloc(slots int) {
|
||||
switch {
|
||||
case slots == 0:
|
||||
s.fields = noObjects{}
|
||||
case slots == 1:
|
||||
// Leave it alone.
|
||||
case slots > 1:
|
||||
fields := make(multipleObjects, slots)
|
||||
s.fields = &fields
|
||||
default:
|
||||
// Violates precondition.
|
||||
panic(fmt.Sprintf("Alloc called with negative slots %d?", slots))
|
||||
}
|
||||
}
|
||||
|
||||
// Fields returns the number of fields.
|
||||
func (s *Struct) Fields() int {
|
||||
switch x := s.fields.(type) {
|
||||
case *multipleObjects:
|
||||
return len(*x)
|
||||
case noObjects:
|
||||
return 0
|
||||
default:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// loadStruct loads an object of type Struct.
|
||||
func loadStruct(r io.Reader) Struct {
|
||||
return Struct{
|
||||
TypeID: TypeID(loadUint(r)),
|
||||
fields: Load(r),
|
||||
}
|
||||
}
|
||||
|
||||
// save implements Object.save.
|
||||
//
|
||||
// Precondition: Alloc must have been called, and the fields all filled in
|
||||
// appropriately. See Alloc and Add for more details.
|
||||
func (s *Struct) save(w io.Writer) {
|
||||
Uint(s.TypeID).save(w)
|
||||
Save(w, s.fields)
|
||||
}
|
||||
|
||||
// load implements Object.load.
|
||||
func (*Struct) load(r io.Reader) Object {
|
||||
s := loadStruct(r)
|
||||
return &s
|
||||
}
|
||||
|
||||
// Object types.
|
||||
//
|
||||
// N.B. Be careful about changing the order or introducing new elements in the
|
||||
// middle here. This is part of the wire format and shouldn't change.
|
||||
const (
|
||||
typeBool Uint = iota
|
||||
typeInt
|
||||
typeUint
|
||||
typeFloat32
|
||||
typeFloat64
|
||||
typeNil
|
||||
typeRef
|
||||
typeString
|
||||
typeSlice
|
||||
typeArray
|
||||
typeMap
|
||||
typeStruct
|
||||
typeNoObjects
|
||||
typeMultipleObjects
|
||||
typeInterface
|
||||
typeComplex64
|
||||
typeComplex128
|
||||
typeType
|
||||
)
|
||||
|
||||
// Save saves the given object.
|
||||
//
|
||||
// +checkescape all
|
||||
//
|
||||
// N.B. This function will panic on error.
|
||||
func Save(w io.Writer, obj Object) {
|
||||
switch x := obj.(type) {
|
||||
case Bool:
|
||||
typeBool.save(w)
|
||||
x.save(w)
|
||||
case Int:
|
||||
typeInt.save(w)
|
||||
x.save(w)
|
||||
case Uint:
|
||||
typeUint.save(w)
|
||||
x.save(w)
|
||||
case Float32:
|
||||
typeFloat32.save(w)
|
||||
x.save(w)
|
||||
case Float64:
|
||||
typeFloat64.save(w)
|
||||
x.save(w)
|
||||
case Nil:
|
||||
typeNil.save(w)
|
||||
x.save(w)
|
||||
case *Ref:
|
||||
typeRef.save(w)
|
||||
x.save(w)
|
||||
case *String:
|
||||
typeString.save(w)
|
||||
x.save(w)
|
||||
case *Slice:
|
||||
typeSlice.save(w)
|
||||
x.save(w)
|
||||
case *Array:
|
||||
typeArray.save(w)
|
||||
x.save(w)
|
||||
case *Map:
|
||||
typeMap.save(w)
|
||||
x.save(w)
|
||||
case *Struct:
|
||||
typeStruct.save(w)
|
||||
x.save(w)
|
||||
case noObjects:
|
||||
typeNoObjects.save(w)
|
||||
x.save(w)
|
||||
case *multipleObjects:
|
||||
typeMultipleObjects.save(w)
|
||||
x.save(w)
|
||||
case *Interface:
|
||||
typeInterface.save(w)
|
||||
x.save(w)
|
||||
case *Type:
|
||||
typeType.save(w)
|
||||
x.save(w)
|
||||
case *Complex64:
|
||||
typeComplex64.save(w)
|
||||
x.save(w)
|
||||
case *Complex128:
|
||||
typeComplex128.save(w)
|
||||
x.save(w)
|
||||
default:
|
||||
panic(fmt.Errorf("unknown type: %#v", obj))
|
||||
}
|
||||
}
|
||||
|
||||
// Load loads a new object.
|
||||
//
|
||||
// +checkescape all
|
||||
//
|
||||
// N.B. This function will panic on error.
|
||||
func Load(r io.Reader) Object {
|
||||
switch hdr := loadUint(r); hdr {
|
||||
case typeBool:
|
||||
return loadBool(r)
|
||||
case typeInt:
|
||||
return loadInt(r)
|
||||
case typeUint:
|
||||
return loadUint(r)
|
||||
case typeFloat32:
|
||||
return loadFloat32(r)
|
||||
case typeFloat64:
|
||||
return loadFloat64(r)
|
||||
case typeNil:
|
||||
return loadNil(r)
|
||||
case typeRef:
|
||||
return ((*Ref)(nil)).load(r) // Escapes.
|
||||
case typeString:
|
||||
return ((*String)(nil)).load(r) // Escapes.
|
||||
case typeSlice:
|
||||
return ((*Slice)(nil)).load(r) // Escapes.
|
||||
case typeArray:
|
||||
return ((*Array)(nil)).load(r) // Escapes.
|
||||
case typeMap:
|
||||
return ((*Map)(nil)).load(r) // Escapes.
|
||||
case typeStruct:
|
||||
return ((*Struct)(nil)).load(r) // Escapes.
|
||||
case typeNoObjects: // Special for struct.
|
||||
return loadNoObjects(r)
|
||||
case typeMultipleObjects: // Special for struct.
|
||||
return ((*multipleObjects)(nil)).load(r) // Escapes.
|
||||
case typeInterface:
|
||||
return ((*Interface)(nil)).load(r) // Escapes.
|
||||
case typeComplex64:
|
||||
return ((*Complex64)(nil)).load(r) // Escapes.
|
||||
case typeComplex128:
|
||||
return ((*Complex128)(nil)).load(r) // Escapes.
|
||||
case typeType:
|
||||
return ((*Type)(nil)).load(r) // Escapes.
|
||||
default:
|
||||
// This is not a valid stream?
|
||||
panic(fmt.Errorf("unknown header: %d", hdr))
|
||||
}
|
||||
}
|
||||
|
||||
// LoadUint loads a single unsigned integer.
|
||||
//
|
||||
// N.B. This function will panic on error.
|
||||
func LoadUint(r io.Reader) uint64 {
|
||||
return uint64(loadUint(r))
|
||||
}
|
||||
|
||||
// SaveUint saves a single unsigned integer.
|
||||
//
|
||||
// N.B. This function will panic on error.
|
||||
func SaveUint(w io.Writer, v uint64) {
|
||||
Uint(v).save(w)
|
||||
}
|
||||
36
vendor/gvisor.dev/gvisor/pkg/sync/aliases.go
vendored
Normal file
36
vendor/gvisor.dev/gvisor/pkg/sync/aliases.go
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Aliases of standard library types.
|
||||
type (
|
||||
// Cond is an alias of sync.Cond.
|
||||
Cond = sync.Cond
|
||||
|
||||
// Locker is an alias of sync.Locker.
|
||||
Locker = sync.Locker
|
||||
|
||||
// Once is an alias of sync.Once.
|
||||
Once = sync.Once
|
||||
|
||||
// Pool is an alias of sync.Pool.
|
||||
Pool = sync.Pool
|
||||
|
||||
// WaitGroup is an alias of sync.WaitGroup.
|
||||
WaitGroup = sync.WaitGroup
|
||||
|
||||
// Map is an alias of sync.Map.
|
||||
Map = sync.Map
|
||||
)
|
||||
|
||||
// NewCond is a wrapper around sync.NewCond.
|
||||
func NewCond(l Locker) *Cond {
|
||||
return sync.NewCond(l)
|
||||
}
|
||||
19
vendor/gvisor.dev/gvisor/pkg/sync/checklocks_off_unsafe.go
vendored
Normal file
19
vendor/gvisor.dev/gvisor/pkg/sync/checklocks_off_unsafe.go
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !checklocks
|
||||
// +build !checklocks
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func noteLock(l unsafe.Pointer) {
|
||||
}
|
||||
|
||||
func noteUnlock(l unsafe.Pointer) {
|
||||
}
|
||||
109
vendor/gvisor.dev/gvisor/pkg/sync/checklocks_on_unsafe.go
vendored
Normal file
109
vendor/gvisor.dev/gvisor/pkg/sync/checklocks_on_unsafe.go
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build checklocks
|
||||
// +build checklocks
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/goid"
|
||||
)
|
||||
|
||||
// gLocks contains metadata about the locks held by a goroutine.
|
||||
type gLocks struct {
|
||||
locksHeld []unsafe.Pointer
|
||||
}
|
||||
|
||||
// map[goid int]*gLocks
|
||||
//
|
||||
// Each key may only be written by the G with the goid it refers to.
|
||||
//
|
||||
// Note that entries are not evicted when a G exit, causing unbounded growth
|
||||
// with new G creation / destruction. If this proves problematic, entries could
|
||||
// be evicted when no locks are held at the expense of more allocations when
|
||||
// taking top-level locks.
|
||||
var locksHeld sync.Map
|
||||
|
||||
func getGLocks() *gLocks {
|
||||
id := goid.Get()
|
||||
|
||||
var locks *gLocks
|
||||
if l, ok := locksHeld.Load(id); ok {
|
||||
locks = l.(*gLocks)
|
||||
} else {
|
||||
locks = &gLocks{
|
||||
// Initialize space for a few locks.
|
||||
locksHeld: make([]unsafe.Pointer, 0, 8),
|
||||
}
|
||||
locksHeld.Store(id, locks)
|
||||
}
|
||||
|
||||
return locks
|
||||
}
|
||||
|
||||
func noteLock(l unsafe.Pointer) {
|
||||
locks := getGLocks()
|
||||
|
||||
for _, lock := range locks.locksHeld {
|
||||
if lock == l {
|
||||
panic(fmt.Sprintf("Deadlock on goroutine %d! Double lock of %p: %+v", goid.Get(), l, locks))
|
||||
}
|
||||
}
|
||||
|
||||
// Commit only after checking for panic conditions so that this lock
|
||||
// isn't on the list if the above panic is recovered.
|
||||
locks.locksHeld = append(locks.locksHeld, l)
|
||||
}
|
||||
|
||||
func noteUnlock(l unsafe.Pointer) {
|
||||
locks := getGLocks()
|
||||
|
||||
if len(locks.locksHeld) == 0 {
|
||||
panic(fmt.Sprintf("Unlock of %p on goroutine %d without any locks held! All locks:\n%s", l, goid.Get(), dumpLocks()))
|
||||
}
|
||||
|
||||
// Search backwards since callers are most likely to unlock in LIFO order.
|
||||
length := len(locks.locksHeld)
|
||||
for i := length - 1; i >= 0; i-- {
|
||||
if l == locks.locksHeld[i] {
|
||||
copy(locks.locksHeld[i:length-1], locks.locksHeld[i+1:length])
|
||||
// Clear last entry to ensure addr can be GC'd.
|
||||
locks.locksHeld[length-1] = nil
|
||||
locks.locksHeld = locks.locksHeld[:length-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Unlock of %p on goroutine %d without matching lock! All locks:\n%s", l, goid.Get(), dumpLocks()))
|
||||
}
|
||||
|
||||
func dumpLocks() string {
|
||||
var s strings.Builder
|
||||
locksHeld.Range(func(key, value any) bool {
|
||||
goid := key.(int64)
|
||||
locks := value.(*gLocks)
|
||||
|
||||
// N.B. accessing gLocks of another G is fundamentally racy.
|
||||
|
||||
fmt.Fprintf(&s, "goroutine %d:\n", goid)
|
||||
if len(locks.locksHeld) == 0 {
|
||||
fmt.Fprintf(&s, "\t<none>\n")
|
||||
}
|
||||
for _, lock := range locks.locksHeld {
|
||||
fmt.Fprintf(&s, "\t%p\n", lock)
|
||||
}
|
||||
fmt.Fprintf(&s, "\n")
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return s.String()
|
||||
}
|
||||
19
vendor/gvisor.dev/gvisor/pkg/sync/fence.go
vendored
Normal file
19
vendor/gvisor.dev/gvisor/pkg/sync/fence.go
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sync
|
||||
|
||||
// MemoryFenceReads ensures that all preceding memory loads happen before
|
||||
// following memory loads.
|
||||
func MemoryFenceReads()
|
||||
26
vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build amd64
|
||||
// +build amd64
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func MemoryFenceReads()
|
||||
TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0
|
||||
// No memory fence is required on x86. However, a compiler fence is
|
||||
// required to prevent the compiler from reordering memory accesses. The Go
|
||||
// compiler will not reorder memory accesses around a call to an assembly
|
||||
// function; compare runtime.publicationBarrier.
|
||||
RET
|
||||
23
vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s
vendored
Normal file
23
vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2023 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func MemoryFenceReads()
|
||||
TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0
|
||||
DMB $0x9 // ISHLD
|
||||
RET
|
||||
151
vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go
vendored
Normal file
151
vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2018 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/gohacks"
|
||||
)
|
||||
|
||||
// Gate is a synchronization primitive that allows concurrent goroutines to
|
||||
// "enter" it as long as it hasn't been closed yet. Once it's been closed,
|
||||
// goroutines cannot enter it anymore, but are allowed to leave, and the closer
|
||||
// will be informed when all goroutines have left.
|
||||
//
|
||||
// Gate is similar to WaitGroup:
|
||||
//
|
||||
// - Gate.Enter() is analogous to WaitGroup.Add(1), but may be called even if
|
||||
// the Gate counter is 0 and fails if Gate.Close() has been called.
|
||||
//
|
||||
// - Gate.Leave() is equivalent to WaitGroup.Done().
|
||||
//
|
||||
// - Gate.Close() is analogous to WaitGroup.Wait(), but also causes future
|
||||
//
|
||||
// calls to Gate.Enter() to fail and may only be called once, from a single
|
||||
// goroutine.
|
||||
//
|
||||
// This is useful, for example, in cases when a goroutine is trying to clean up
|
||||
// an object for which multiple goroutines have pointers. In such a case, users
|
||||
// would be required to enter and leave the Gate, and the cleaner would wait
|
||||
// until all users are gone (and no new ones are allowed) before proceeding.
|
||||
//
|
||||
// Users:
|
||||
//
|
||||
// if !g.Enter() {
|
||||
// // Gate is closed, we can't use the object.
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// // Do something with object.
|
||||
// [...]
|
||||
//
|
||||
// g.Leave()
|
||||
//
|
||||
// Closer:
|
||||
//
|
||||
// // Prevent new users from using the object, and wait for the existing
|
||||
// // ones to complete.
|
||||
// g.Close()
|
||||
//
|
||||
// // Clean up the object.
|
||||
// [...]
|
||||
type Gate struct {
|
||||
userCount int32
|
||||
closingG uintptr
|
||||
}
|
||||
|
||||
const preparingG = 1
|
||||
|
||||
// Enter tries to enter the gate. It will succeed if it hasn't been closed yet,
|
||||
// in which case the caller must eventually call Leave().
|
||||
//
|
||||
// This function is thread-safe.
|
||||
func (g *Gate) Enter() bool {
|
||||
if atomic.AddInt32(&g.userCount, 1) > 0 {
|
||||
return true
|
||||
}
|
||||
g.leaveAfterFailedEnter()
|
||||
return false
|
||||
}
|
||||
|
||||
// leaveAfterFailedEnter is identical to Leave, but is marked noinline to
|
||||
// prevent it from being inlined into Enter, since as of this writing inlining
|
||||
// Leave into Enter prevents Enter from being inlined into its callers.
|
||||
//
|
||||
//go:noinline
|
||||
func (g *Gate) leaveAfterFailedEnter() {
|
||||
if atomic.AddInt32(&g.userCount, -1) == math.MinInt32 {
|
||||
g.leaveClosed()
|
||||
}
|
||||
}
|
||||
|
||||
// Leave leaves the gate. This must only be called after a successful call to
|
||||
// Enter(). If the gate has been closed and this is the last one inside the
|
||||
// gate, it will notify the closer that the gate is done.
|
||||
//
|
||||
// This function is thread-safe.
|
||||
func (g *Gate) Leave() {
|
||||
if atomic.AddInt32(&g.userCount, -1) == math.MinInt32 {
|
||||
g.leaveClosed()
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Gate) leaveClosed() {
|
||||
if atomic.LoadUintptr(&g.closingG) == 0 {
|
||||
return
|
||||
}
|
||||
if g := atomic.SwapUintptr(&g.closingG, 0); g > preparingG {
|
||||
goready(g, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the gate, causing future calls to Enter to fail, and waits
|
||||
// until all goroutines that are currently inside the gate leave before
|
||||
// returning.
|
||||
//
|
||||
// Only one goroutine can call this function.
|
||||
func (g *Gate) Close() {
|
||||
if atomic.LoadInt32(&g.userCount) == math.MinInt32 {
|
||||
// The gate is already closed, with no goroutines inside. For legacy
|
||||
// reasons, we have to allow Close to be called again in this case.
|
||||
return
|
||||
}
|
||||
if v := atomic.AddInt32(&g.userCount, math.MinInt32); v == math.MinInt32 {
|
||||
// userCount was already 0.
|
||||
return
|
||||
} else if v >= 0 {
|
||||
panic("concurrent Close of sync.Gate")
|
||||
}
|
||||
|
||||
if g := atomic.SwapUintptr(&g.closingG, preparingG); g != 0 {
|
||||
panic(fmt.Sprintf("invalid sync.Gate.closingG during Close: %#x", g))
|
||||
}
|
||||
if atomic.LoadInt32(&g.userCount) == math.MinInt32 {
|
||||
// The last call to Leave arrived while we were setting up closingG.
|
||||
return
|
||||
}
|
||||
// WaitReasonSemacquire/TraceBlockSync are consistent with WaitGroup.
|
||||
gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceBlockSync, 0)
|
||||
}
|
||||
|
||||
//go:norace
|
||||
//go:nosplit
|
||||
func gateCommit(g uintptr, closingG unsafe.Pointer) bool {
|
||||
return RaceUncheckedAtomicCompareAndSwapUintptr((*uintptr)(closingG), preparingG, g)
|
||||
}
|
||||
18
vendor/gvisor.dev/gvisor/pkg/sync/goyield_go113_unsafe.go
vendored
Normal file
18
vendor/gvisor.dev/gvisor/pkg/sync/goyield_go113_unsafe.go
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.13 && !go1.14
|
||||
// +build go1.13,!go1.14
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func goyield() {
|
||||
// goyield is not available until Go 1.14.
|
||||
runtime.Gosched()
|
||||
}
|
||||
20
vendor/gvisor.dev/gvisor/pkg/sync/goyield_unsafe.go
vendored
Normal file
20
vendor/gvisor.dev/gvisor/pkg/sync/goyield_unsafe.go
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.14
|
||||
// +build go1.14
|
||||
|
||||
// //go:linkname directives type-checked by checklinkname. Any other
|
||||
// non-linkname assumptions outside the Go 1 compatibility guarantee should
|
||||
// have an accompanied vet check or version guard build tag.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
_ "unsafe" // for go:linkname
|
||||
)
|
||||
|
||||
//go:linkname goyield runtime.goyield
|
||||
func goyield()
|
||||
445
vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_ancestors_unsafe.go
vendored
Normal file
445
vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_ancestors_unsafe.go
vendored
Normal file
@@ -0,0 +1,445 @@
|
||||
package locking
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/gohacks"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// ShardOrder is an optional parameter specifying the base-2 log of the
|
||||
// number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
|
||||
// unnecessary synchronization between unrelated concurrent operations,
|
||||
// improving performance for write-heavy workloads, but increase memory
|
||||
// usage for small maps.
|
||||
ancestorsShardOrder = 0
|
||||
)
|
||||
|
||||
// Hasher is an optional type parameter. If Hasher is provided, it must define
|
||||
// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
|
||||
type ancestorsHasher struct {
|
||||
ancestorsdefaultHasher
|
||||
}
|
||||
|
||||
// defaultHasher is the default Hasher. This indirection exists because
|
||||
// defaultHasher must exist even if a custom Hasher is provided, to prevent the
|
||||
// Go compiler from complaining about defaultHasher's unused imports.
|
||||
type ancestorsdefaultHasher struct {
|
||||
fn func(unsafe.Pointer, uintptr) uintptr
|
||||
seed uintptr
|
||||
}
|
||||
|
||||
// Init initializes the Hasher.
|
||||
func (h *ancestorsdefaultHasher) Init() {
|
||||
h.fn = sync.MapKeyHasher(map[*MutexClass]*string(nil))
|
||||
h.seed = sync.RandUintptr()
|
||||
}
|
||||
|
||||
// Hash returns the hash value for the given Key.
|
||||
func (h *ancestorsdefaultHasher) Hash(key *MutexClass) uintptr {
|
||||
return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
|
||||
}
|
||||
|
||||
var ancestorshasher ancestorsHasher
|
||||
|
||||
func init() {
|
||||
ancestorshasher.Init()
|
||||
}
|
||||
|
||||
// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
|
||||
// safe for concurrent use from multiple goroutines without additional
|
||||
// synchronization.
|
||||
//
|
||||
// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
|
||||
// use. AtomicPtrMaps must not be copied after first use.
|
||||
//
|
||||
// sync.Map may be faster than AtomicPtrMap if most operations on the map are
|
||||
// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
|
||||
// other circumstances.
|
||||
type ancestorsAtomicPtrMap struct {
|
||||
shards [1 << ancestorsShardOrder]ancestorsapmShard
|
||||
}
|
||||
|
||||
func (m *ancestorsAtomicPtrMap) shard(hash uintptr) *ancestorsapmShard {
|
||||
// Go defines right shifts >= width of shifted unsigned operand as 0, so
|
||||
// this is correct even if ShardOrder is 0 (although nogo complains because
|
||||
// nogo is dumb).
|
||||
const indexLSB = unsafe.Sizeof(uintptr(0))*8 - ancestorsShardOrder
|
||||
index := hash >> indexLSB
|
||||
return (*ancestorsapmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(ancestorsapmShard{}))))
|
||||
}
|
||||
|
||||
type ancestorsapmShard struct {
|
||||
ancestorsapmShardMutationData
|
||||
_ [ancestorsapmShardMutationDataPadding]byte
|
||||
ancestorsapmShardLookupData
|
||||
_ [ancestorsapmShardLookupDataPadding]byte
|
||||
}
|
||||
|
||||
type ancestorsapmShardMutationData struct {
|
||||
dirtyMu sync.Mutex // serializes slot transitions out of empty
|
||||
dirty uintptr // # slots with val != nil
|
||||
count uintptr // # slots with val != nil and val != tombstone()
|
||||
rehashMu sync.Mutex // serializes rehashing
|
||||
}
|
||||
|
||||
type ancestorsapmShardLookupData struct {
|
||||
seq sync.SeqCount // allows atomic reads of slots+mask
|
||||
slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
|
||||
mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
|
||||
}
|
||||
|
||||
const (
|
||||
ancestorscacheLineBytes = 64
|
||||
// Cache line padding is enabled if sharding is.
|
||||
ancestorsapmEnablePadding = (ancestorsShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
|
||||
// The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
|
||||
// cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
|
||||
ancestorsapmShardMutationDataRequiredPadding = ancestorscacheLineBytes - (((unsafe.Sizeof(ancestorsapmShardMutationData{}) - 1) % ancestorscacheLineBytes) + 1)
|
||||
ancestorsapmShardMutationDataPadding = ancestorsapmEnablePadding * ancestorsapmShardMutationDataRequiredPadding
|
||||
ancestorsapmShardLookupDataRequiredPadding = ancestorscacheLineBytes - (((unsafe.Sizeof(ancestorsapmShardLookupData{}) - 1) % ancestorscacheLineBytes) + 1)
|
||||
ancestorsapmShardLookupDataPadding = ancestorsapmEnablePadding * ancestorsapmShardLookupDataRequiredPadding
|
||||
|
||||
// These define fractional thresholds for when apmShard.rehash() is called
|
||||
// (i.e. the load factor) and when it rehases to a larger table
|
||||
// respectively. They are chosen such that the rehash threshold = the
|
||||
// expansion threshold + 1/2, so that when reuse of deleted slots is rare
|
||||
// or non-existent, rehashing occurs after the insertion of at least 1/2
|
||||
// the table's size in new entries, which is acceptably infrequent.
|
||||
ancestorsapmRehashThresholdNum = 2
|
||||
ancestorsapmRehashThresholdDen = 3
|
||||
ancestorsapmExpansionThresholdNum = 1
|
||||
ancestorsapmExpansionThresholdDen = 6
|
||||
)
|
||||
|
||||
type ancestorsapmSlot struct {
|
||||
// slot states are indicated by val:
|
||||
//
|
||||
// * Empty: val == nil; key is meaningless. May transition to full or
|
||||
// evacuated with dirtyMu locked.
|
||||
//
|
||||
// * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
|
||||
// is the Value mapped to key. May transition to deleted or evacuated.
|
||||
//
|
||||
// * Deleted: val == tombstone(); key is still immutable. key is mapped to
|
||||
// no Value. May transition to full or evacuated.
|
||||
//
|
||||
// * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
|
||||
// slots that have already been moved, requiring readers to wait for
|
||||
// rehashing to complete and use the new table. Terminal state.
|
||||
//
|
||||
// Note that once val is non-nil, it cannot become nil again. That is, the
|
||||
// transition from empty to non-empty is irreversible for a given slot;
|
||||
// the only way to create more empty slots is by rehashing.
|
||||
val unsafe.Pointer
|
||||
key *MutexClass
|
||||
}
|
||||
|
||||
func ancestorsapmSlotAt(slots unsafe.Pointer, pos uintptr) *ancestorsapmSlot {
|
||||
return (*ancestorsapmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(ancestorsapmSlot{})))
|
||||
}
|
||||
|
||||
var ancestorstombstoneObj byte
|
||||
|
||||
func ancestorstombstone() unsafe.Pointer {
|
||||
return unsafe.Pointer(&ancestorstombstoneObj)
|
||||
}
|
||||
|
||||
var ancestorsevacuatedObj byte
|
||||
|
||||
func ancestorsevacuated() unsafe.Pointer {
|
||||
return unsafe.Pointer(&ancestorsevacuatedObj)
|
||||
}
|
||||
|
||||
// Load returns the Value stored in m for key.
|
||||
func (m *ancestorsAtomicPtrMap) Load(key *MutexClass) *string {
|
||||
hash := ancestorshasher.Hash(key)
|
||||
shard := m.shard(hash)
|
||||
|
||||
retry:
|
||||
epoch := shard.seq.BeginRead()
|
||||
slots := atomic.LoadPointer(&shard.slots)
|
||||
mask := atomic.LoadUintptr(&shard.mask)
|
||||
if !shard.seq.ReadOk(epoch) {
|
||||
goto retry
|
||||
}
|
||||
if slots == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
i := hash & mask
|
||||
inc := uintptr(1)
|
||||
for {
|
||||
slot := ancestorsapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil {
|
||||
|
||||
return nil
|
||||
}
|
||||
if slotVal == ancestorsevacuated() {
|
||||
|
||||
goto retry
|
||||
}
|
||||
if slot.key == key {
|
||||
if slotVal == ancestorstombstone() {
|
||||
return nil
|
||||
}
|
||||
return (*string)(slotVal)
|
||||
}
|
||||
i = (i + inc) & mask
|
||||
inc++
|
||||
}
|
||||
}
|
||||
|
||||
// Store stores the Value val for key.
|
||||
func (m *ancestorsAtomicPtrMap) Store(key *MutexClass, val *string) {
|
||||
m.maybeCompareAndSwap(key, false, nil, val)
|
||||
}
|
||||
|
||||
// Swap stores the Value val for key and returns the previously-mapped Value.
|
||||
func (m *ancestorsAtomicPtrMap) Swap(key *MutexClass, val *string) *string {
|
||||
return m.maybeCompareAndSwap(key, false, nil, val)
|
||||
}
|
||||
|
||||
// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
|
||||
// stores the Value newVal for key. CompareAndSwap returns the previous Value
|
||||
// stored for key, whether or not it stores newVal.
|
||||
func (m *ancestorsAtomicPtrMap) CompareAndSwap(key *MutexClass, oldVal, newVal *string) *string {
|
||||
return m.maybeCompareAndSwap(key, true, oldVal, newVal)
|
||||
}
|
||||
|
||||
func (m *ancestorsAtomicPtrMap) maybeCompareAndSwap(key *MutexClass, compare bool, typedOldVal, typedNewVal *string) *string {
|
||||
hash := ancestorshasher.Hash(key)
|
||||
shard := m.shard(hash)
|
||||
oldVal := ancestorstombstone()
|
||||
if typedOldVal != nil {
|
||||
oldVal = unsafe.Pointer(typedOldVal)
|
||||
}
|
||||
newVal := ancestorstombstone()
|
||||
if typedNewVal != nil {
|
||||
newVal = unsafe.Pointer(typedNewVal)
|
||||
}
|
||||
|
||||
retry:
|
||||
epoch := shard.seq.BeginRead()
|
||||
slots := atomic.LoadPointer(&shard.slots)
|
||||
mask := atomic.LoadUintptr(&shard.mask)
|
||||
if !shard.seq.ReadOk(epoch) {
|
||||
goto retry
|
||||
}
|
||||
if slots == nil {
|
||||
if (compare && oldVal != ancestorstombstone()) || newVal == ancestorstombstone() {
|
||||
return nil
|
||||
}
|
||||
|
||||
shard.rehash(nil)
|
||||
goto retry
|
||||
}
|
||||
|
||||
i := hash & mask
|
||||
inc := uintptr(1)
|
||||
for {
|
||||
slot := ancestorsapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil {
|
||||
if (compare && oldVal != ancestorstombstone()) || newVal == ancestorstombstone() {
|
||||
return nil
|
||||
}
|
||||
|
||||
shard.dirtyMu.Lock()
|
||||
slotVal = atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil {
|
||||
|
||||
if dirty, capacity := shard.dirty+1, mask+1; dirty*ancestorsapmRehashThresholdDen >= capacity*ancestorsapmRehashThresholdNum {
|
||||
shard.dirtyMu.Unlock()
|
||||
shard.rehash(slots)
|
||||
goto retry
|
||||
}
|
||||
slot.key = key
|
||||
atomic.StorePointer(&slot.val, newVal)
|
||||
shard.dirty++
|
||||
atomic.AddUintptr(&shard.count, 1)
|
||||
shard.dirtyMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
shard.dirtyMu.Unlock()
|
||||
}
|
||||
if slotVal == ancestorsevacuated() {
|
||||
|
||||
goto retry
|
||||
}
|
||||
if slot.key == key {
|
||||
|
||||
for {
|
||||
if (compare && oldVal != slotVal) || newVal == slotVal {
|
||||
if slotVal == ancestorstombstone() {
|
||||
return nil
|
||||
}
|
||||
return (*string)(slotVal)
|
||||
}
|
||||
if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
|
||||
if slotVal == ancestorstombstone() {
|
||||
atomic.AddUintptr(&shard.count, 1)
|
||||
return nil
|
||||
}
|
||||
if newVal == ancestorstombstone() {
|
||||
atomic.AddUintptr(&shard.count, ^uintptr(0))
|
||||
}
|
||||
return (*string)(slotVal)
|
||||
}
|
||||
slotVal = atomic.LoadPointer(&slot.val)
|
||||
if slotVal == ancestorsevacuated() {
|
||||
goto retry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
i = (i + inc) & mask
|
||||
inc++
|
||||
}
|
||||
}
|
||||
|
||||
// rehash is marked nosplit to avoid preemption during table copying.
|
||||
//
|
||||
//go:nosplit
|
||||
func (shard *ancestorsapmShard) rehash(oldSlots unsafe.Pointer) {
|
||||
shard.rehashMu.Lock()
|
||||
defer shard.rehashMu.Unlock()
|
||||
|
||||
if shard.slots != oldSlots {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
newSize := uintptr(8)
|
||||
if oldSlots != nil {
|
||||
oldSize := shard.mask + 1
|
||||
newSize = oldSize
|
||||
if count := atomic.LoadUintptr(&shard.count) + 1; count*ancestorsapmExpansionThresholdDen > oldSize*ancestorsapmExpansionThresholdNum {
|
||||
newSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
newSlotsSlice := make([]ancestorsapmSlot, newSize)
|
||||
newSlots := unsafe.Pointer(&newSlotsSlice[0])
|
||||
newMask := newSize - 1
|
||||
|
||||
shard.dirtyMu.Lock()
|
||||
shard.seq.BeginWrite()
|
||||
|
||||
if oldSlots != nil {
|
||||
realCount := uintptr(0)
|
||||
|
||||
oldMask := shard.mask
|
||||
for i := uintptr(0); i <= oldMask; i++ {
|
||||
oldSlot := ancestorsapmSlotAt(oldSlots, i)
|
||||
val := atomic.SwapPointer(&oldSlot.val, ancestorsevacuated())
|
||||
if val == nil || val == ancestorstombstone() {
|
||||
continue
|
||||
}
|
||||
hash := ancestorshasher.Hash(oldSlot.key)
|
||||
j := hash & newMask
|
||||
inc := uintptr(1)
|
||||
for {
|
||||
newSlot := ancestorsapmSlotAt(newSlots, j)
|
||||
if newSlot.val == nil {
|
||||
newSlot.val = val
|
||||
newSlot.key = oldSlot.key
|
||||
break
|
||||
}
|
||||
j = (j + inc) & newMask
|
||||
inc++
|
||||
}
|
||||
realCount++
|
||||
}
|
||||
|
||||
shard.dirty = realCount
|
||||
}
|
||||
|
||||
atomic.StorePointer(&shard.slots, newSlots)
|
||||
atomic.StoreUintptr(&shard.mask, newMask)
|
||||
|
||||
shard.seq.EndWrite()
|
||||
shard.dirtyMu.Unlock()
|
||||
}
|
||||
|
||||
// Range invokes f on each Key-Value pair stored in m. If any call to f returns
|
||||
// false, Range stops iteration and returns.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot of the
|
||||
// Map's contents: no Key will be visited more than once, but if the Value for
|
||||
// any Key is stored or deleted concurrently, Range may reflect any mapping for
|
||||
// that Key from any point during the Range call.
|
||||
//
|
||||
// f must not call other methods on m.
|
||||
func (m *ancestorsAtomicPtrMap) Range(f func(key *MutexClass, val *string) bool) {
|
||||
for si := 0; si < len(m.shards); si++ {
|
||||
shard := &m.shards[si]
|
||||
if !shard.doRange(f) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (shard *ancestorsapmShard) doRange(f func(key *MutexClass, val *string) bool) bool {
|
||||
|
||||
shard.rehashMu.Lock()
|
||||
defer shard.rehashMu.Unlock()
|
||||
slots := shard.slots
|
||||
if slots == nil {
|
||||
return true
|
||||
}
|
||||
mask := shard.mask
|
||||
for i := uintptr(0); i <= mask; i++ {
|
||||
slot := ancestorsapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil || slotVal == ancestorstombstone() {
|
||||
continue
|
||||
}
|
||||
if !f(slot.key, (*string)(slotVal)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RangeRepeatable is like Range, but:
|
||||
//
|
||||
// - RangeRepeatable may visit the same Key multiple times in the presence of
|
||||
// concurrent mutators, possibly passing different Values to f in different
|
||||
// calls.
|
||||
//
|
||||
// - It is safe for f to call other methods on m.
|
||||
func (m *ancestorsAtomicPtrMap) RangeRepeatable(f func(key *MutexClass, val *string) bool) {
|
||||
for si := 0; si < len(m.shards); si++ {
|
||||
shard := &m.shards[si]
|
||||
|
||||
retry:
|
||||
epoch := shard.seq.BeginRead()
|
||||
slots := atomic.LoadPointer(&shard.slots)
|
||||
mask := atomic.LoadUintptr(&shard.mask)
|
||||
if !shard.seq.ReadOk(epoch) {
|
||||
goto retry
|
||||
}
|
||||
if slots == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := uintptr(0); i <= mask; i++ {
|
||||
slot := ancestorsapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == ancestorsevacuated() {
|
||||
goto retry
|
||||
}
|
||||
if slotVal == nil || slotVal == ancestorstombstone() {
|
||||
continue
|
||||
}
|
||||
if !f(slot.key, (*string)(slotVal)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
445
vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_goroutine_unsafe.go
vendored
Normal file
445
vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_goroutine_unsafe.go
vendored
Normal file
@@ -0,0 +1,445 @@
|
||||
package locking
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/gohacks"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// ShardOrder is an optional parameter specifying the base-2 log of the
|
||||
// number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
|
||||
// unnecessary synchronization between unrelated concurrent operations,
|
||||
// improving performance for write-heavy workloads, but increase memory
|
||||
// usage for small maps.
|
||||
goroutineLocksShardOrder = 0
|
||||
)
|
||||
|
||||
// Hasher is an optional type parameter. If Hasher is provided, it must define
|
||||
// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
|
||||
type goroutineLocksHasher struct {
|
||||
goroutineLocksdefaultHasher
|
||||
}
|
||||
|
||||
// defaultHasher is the default Hasher. This indirection exists because
|
||||
// defaultHasher must exist even if a custom Hasher is provided, to prevent the
|
||||
// Go compiler from complaining about defaultHasher's unused imports.
|
||||
type goroutineLocksdefaultHasher struct {
|
||||
fn func(unsafe.Pointer, uintptr) uintptr
|
||||
seed uintptr
|
||||
}
|
||||
|
||||
// Init initializes the Hasher.
|
||||
func (h *goroutineLocksdefaultHasher) Init() {
|
||||
h.fn = sync.MapKeyHasher(map[int64]*goroutineLocks(nil))
|
||||
h.seed = sync.RandUintptr()
|
||||
}
|
||||
|
||||
// Hash returns the hash value for the given Key.
|
||||
func (h *goroutineLocksdefaultHasher) Hash(key int64) uintptr {
|
||||
return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
|
||||
}
|
||||
|
||||
var goroutineLockshasher goroutineLocksHasher
|
||||
|
||||
func init() {
|
||||
goroutineLockshasher.Init()
|
||||
}
|
||||
|
||||
// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
|
||||
// safe for concurrent use from multiple goroutines without additional
|
||||
// synchronization.
|
||||
//
|
||||
// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
|
||||
// use. AtomicPtrMaps must not be copied after first use.
|
||||
//
|
||||
// sync.Map may be faster than AtomicPtrMap if most operations on the map are
|
||||
// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
|
||||
// other circumstances.
|
||||
type goroutineLocksAtomicPtrMap struct {
|
||||
shards [1 << goroutineLocksShardOrder]goroutineLocksapmShard
|
||||
}
|
||||
|
||||
func (m *goroutineLocksAtomicPtrMap) shard(hash uintptr) *goroutineLocksapmShard {
|
||||
// Go defines right shifts >= width of shifted unsigned operand as 0, so
|
||||
// this is correct even if ShardOrder is 0 (although nogo complains because
|
||||
// nogo is dumb).
|
||||
const indexLSB = unsafe.Sizeof(uintptr(0))*8 - goroutineLocksShardOrder
|
||||
index := hash >> indexLSB
|
||||
return (*goroutineLocksapmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(goroutineLocksapmShard{}))))
|
||||
}
|
||||
|
||||
type goroutineLocksapmShard struct {
|
||||
goroutineLocksapmShardMutationData
|
||||
_ [goroutineLocksapmShardMutationDataPadding]byte
|
||||
goroutineLocksapmShardLookupData
|
||||
_ [goroutineLocksapmShardLookupDataPadding]byte
|
||||
}
|
||||
|
||||
type goroutineLocksapmShardMutationData struct {
|
||||
dirtyMu sync.Mutex // serializes slot transitions out of empty
|
||||
dirty uintptr // # slots with val != nil
|
||||
count uintptr // # slots with val != nil and val != tombstone()
|
||||
rehashMu sync.Mutex // serializes rehashing
|
||||
}
|
||||
|
||||
type goroutineLocksapmShardLookupData struct {
|
||||
seq sync.SeqCount // allows atomic reads of slots+mask
|
||||
slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
|
||||
mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
|
||||
}
|
||||
|
||||
const (
|
||||
goroutineLockscacheLineBytes = 64
|
||||
// Cache line padding is enabled if sharding is.
|
||||
goroutineLocksapmEnablePadding = (goroutineLocksShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
|
||||
// The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
|
||||
// cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
|
||||
goroutineLocksapmShardMutationDataRequiredPadding = goroutineLockscacheLineBytes - (((unsafe.Sizeof(goroutineLocksapmShardMutationData{}) - 1) % goroutineLockscacheLineBytes) + 1)
|
||||
goroutineLocksapmShardMutationDataPadding = goroutineLocksapmEnablePadding * goroutineLocksapmShardMutationDataRequiredPadding
|
||||
goroutineLocksapmShardLookupDataRequiredPadding = goroutineLockscacheLineBytes - (((unsafe.Sizeof(goroutineLocksapmShardLookupData{}) - 1) % goroutineLockscacheLineBytes) + 1)
|
||||
goroutineLocksapmShardLookupDataPadding = goroutineLocksapmEnablePadding * goroutineLocksapmShardLookupDataRequiredPadding
|
||||
|
||||
// These define fractional thresholds for when apmShard.rehash() is called
|
||||
// (i.e. the load factor) and when it rehases to a larger table
|
||||
// respectively. They are chosen such that the rehash threshold = the
|
||||
// expansion threshold + 1/2, so that when reuse of deleted slots is rare
|
||||
// or non-existent, rehashing occurs after the insertion of at least 1/2
|
||||
// the table's size in new entries, which is acceptably infrequent.
|
||||
goroutineLocksapmRehashThresholdNum = 2
|
||||
goroutineLocksapmRehashThresholdDen = 3
|
||||
goroutineLocksapmExpansionThresholdNum = 1
|
||||
goroutineLocksapmExpansionThresholdDen = 6
|
||||
)
|
||||
|
||||
type goroutineLocksapmSlot struct {
|
||||
// slot states are indicated by val:
|
||||
//
|
||||
// * Empty: val == nil; key is meaningless. May transition to full or
|
||||
// evacuated with dirtyMu locked.
|
||||
//
|
||||
// * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
|
||||
// is the Value mapped to key. May transition to deleted or evacuated.
|
||||
//
|
||||
// * Deleted: val == tombstone(); key is still immutable. key is mapped to
|
||||
// no Value. May transition to full or evacuated.
|
||||
//
|
||||
// * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
|
||||
// slots that have already been moved, requiring readers to wait for
|
||||
// rehashing to complete and use the new table. Terminal state.
|
||||
//
|
||||
// Note that once val is non-nil, it cannot become nil again. That is, the
|
||||
// transition from empty to non-empty is irreversible for a given slot;
|
||||
// the only way to create more empty slots is by rehashing.
|
||||
val unsafe.Pointer
|
||||
key int64
|
||||
}
|
||||
|
||||
func goroutineLocksapmSlotAt(slots unsafe.Pointer, pos uintptr) *goroutineLocksapmSlot {
|
||||
return (*goroutineLocksapmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(goroutineLocksapmSlot{})))
|
||||
}
|
||||
|
||||
var goroutineLockstombstoneObj byte
|
||||
|
||||
func goroutineLockstombstone() unsafe.Pointer {
|
||||
return unsafe.Pointer(&goroutineLockstombstoneObj)
|
||||
}
|
||||
|
||||
var goroutineLocksevacuatedObj byte
|
||||
|
||||
func goroutineLocksevacuated() unsafe.Pointer {
|
||||
return unsafe.Pointer(&goroutineLocksevacuatedObj)
|
||||
}
|
||||
|
||||
// Load returns the Value stored in m for key.
|
||||
func (m *goroutineLocksAtomicPtrMap) Load(key int64) *goroutineLocks {
|
||||
hash := goroutineLockshasher.Hash(key)
|
||||
shard := m.shard(hash)
|
||||
|
||||
retry:
|
||||
epoch := shard.seq.BeginRead()
|
||||
slots := atomic.LoadPointer(&shard.slots)
|
||||
mask := atomic.LoadUintptr(&shard.mask)
|
||||
if !shard.seq.ReadOk(epoch) {
|
||||
goto retry
|
||||
}
|
||||
if slots == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
i := hash & mask
|
||||
inc := uintptr(1)
|
||||
for {
|
||||
slot := goroutineLocksapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil {
|
||||
|
||||
return nil
|
||||
}
|
||||
if slotVal == goroutineLocksevacuated() {
|
||||
|
||||
goto retry
|
||||
}
|
||||
if slot.key == key {
|
||||
if slotVal == goroutineLockstombstone() {
|
||||
return nil
|
||||
}
|
||||
return (*goroutineLocks)(slotVal)
|
||||
}
|
||||
i = (i + inc) & mask
|
||||
inc++
|
||||
}
|
||||
}
|
||||
|
||||
// Store stores the Value val for key.
|
||||
func (m *goroutineLocksAtomicPtrMap) Store(key int64, val *goroutineLocks) {
|
||||
m.maybeCompareAndSwap(key, false, nil, val)
|
||||
}
|
||||
|
||||
// Swap stores the Value val for key and returns the previously-mapped Value.
|
||||
func (m *goroutineLocksAtomicPtrMap) Swap(key int64, val *goroutineLocks) *goroutineLocks {
|
||||
return m.maybeCompareAndSwap(key, false, nil, val)
|
||||
}
|
||||
|
||||
// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
|
||||
// stores the Value newVal for key. CompareAndSwap returns the previous Value
|
||||
// stored for key, whether or not it stores newVal.
|
||||
func (m *goroutineLocksAtomicPtrMap) CompareAndSwap(key int64, oldVal, newVal *goroutineLocks) *goroutineLocks {
|
||||
return m.maybeCompareAndSwap(key, true, oldVal, newVal)
|
||||
}
|
||||
|
||||
func (m *goroutineLocksAtomicPtrMap) maybeCompareAndSwap(key int64, compare bool, typedOldVal, typedNewVal *goroutineLocks) *goroutineLocks {
|
||||
hash := goroutineLockshasher.Hash(key)
|
||||
shard := m.shard(hash)
|
||||
oldVal := goroutineLockstombstone()
|
||||
if typedOldVal != nil {
|
||||
oldVal = unsafe.Pointer(typedOldVal)
|
||||
}
|
||||
newVal := goroutineLockstombstone()
|
||||
if typedNewVal != nil {
|
||||
newVal = unsafe.Pointer(typedNewVal)
|
||||
}
|
||||
|
||||
retry:
|
||||
epoch := shard.seq.BeginRead()
|
||||
slots := atomic.LoadPointer(&shard.slots)
|
||||
mask := atomic.LoadUintptr(&shard.mask)
|
||||
if !shard.seq.ReadOk(epoch) {
|
||||
goto retry
|
||||
}
|
||||
if slots == nil {
|
||||
if (compare && oldVal != goroutineLockstombstone()) || newVal == goroutineLockstombstone() {
|
||||
return nil
|
||||
}
|
||||
|
||||
shard.rehash(nil)
|
||||
goto retry
|
||||
}
|
||||
|
||||
i := hash & mask
|
||||
inc := uintptr(1)
|
||||
for {
|
||||
slot := goroutineLocksapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil {
|
||||
if (compare && oldVal != goroutineLockstombstone()) || newVal == goroutineLockstombstone() {
|
||||
return nil
|
||||
}
|
||||
|
||||
shard.dirtyMu.Lock()
|
||||
slotVal = atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil {
|
||||
|
||||
if dirty, capacity := shard.dirty+1, mask+1; dirty*goroutineLocksapmRehashThresholdDen >= capacity*goroutineLocksapmRehashThresholdNum {
|
||||
shard.dirtyMu.Unlock()
|
||||
shard.rehash(slots)
|
||||
goto retry
|
||||
}
|
||||
slot.key = key
|
||||
atomic.StorePointer(&slot.val, newVal)
|
||||
shard.dirty++
|
||||
atomic.AddUintptr(&shard.count, 1)
|
||||
shard.dirtyMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
shard.dirtyMu.Unlock()
|
||||
}
|
||||
if slotVal == goroutineLocksevacuated() {
|
||||
|
||||
goto retry
|
||||
}
|
||||
if slot.key == key {
|
||||
|
||||
for {
|
||||
if (compare && oldVal != slotVal) || newVal == slotVal {
|
||||
if slotVal == goroutineLockstombstone() {
|
||||
return nil
|
||||
}
|
||||
return (*goroutineLocks)(slotVal)
|
||||
}
|
||||
if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
|
||||
if slotVal == goroutineLockstombstone() {
|
||||
atomic.AddUintptr(&shard.count, 1)
|
||||
return nil
|
||||
}
|
||||
if newVal == goroutineLockstombstone() {
|
||||
atomic.AddUintptr(&shard.count, ^uintptr(0))
|
||||
}
|
||||
return (*goroutineLocks)(slotVal)
|
||||
}
|
||||
slotVal = atomic.LoadPointer(&slot.val)
|
||||
if slotVal == goroutineLocksevacuated() {
|
||||
goto retry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
i = (i + inc) & mask
|
||||
inc++
|
||||
}
|
||||
}
|
||||
|
||||
// rehash is marked nosplit to avoid preemption during table copying.
|
||||
//
|
||||
//go:nosplit
|
||||
func (shard *goroutineLocksapmShard) rehash(oldSlots unsafe.Pointer) {
|
||||
shard.rehashMu.Lock()
|
||||
defer shard.rehashMu.Unlock()
|
||||
|
||||
if shard.slots != oldSlots {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
newSize := uintptr(8)
|
||||
if oldSlots != nil {
|
||||
oldSize := shard.mask + 1
|
||||
newSize = oldSize
|
||||
if count := atomic.LoadUintptr(&shard.count) + 1; count*goroutineLocksapmExpansionThresholdDen > oldSize*goroutineLocksapmExpansionThresholdNum {
|
||||
newSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
newSlotsSlice := make([]goroutineLocksapmSlot, newSize)
|
||||
newSlots := unsafe.Pointer(&newSlotsSlice[0])
|
||||
newMask := newSize - 1
|
||||
|
||||
shard.dirtyMu.Lock()
|
||||
shard.seq.BeginWrite()
|
||||
|
||||
if oldSlots != nil {
|
||||
realCount := uintptr(0)
|
||||
|
||||
oldMask := shard.mask
|
||||
for i := uintptr(0); i <= oldMask; i++ {
|
||||
oldSlot := goroutineLocksapmSlotAt(oldSlots, i)
|
||||
val := atomic.SwapPointer(&oldSlot.val, goroutineLocksevacuated())
|
||||
if val == nil || val == goroutineLockstombstone() {
|
||||
continue
|
||||
}
|
||||
hash := goroutineLockshasher.Hash(oldSlot.key)
|
||||
j := hash & newMask
|
||||
inc := uintptr(1)
|
||||
for {
|
||||
newSlot := goroutineLocksapmSlotAt(newSlots, j)
|
||||
if newSlot.val == nil {
|
||||
newSlot.val = val
|
||||
newSlot.key = oldSlot.key
|
||||
break
|
||||
}
|
||||
j = (j + inc) & newMask
|
||||
inc++
|
||||
}
|
||||
realCount++
|
||||
}
|
||||
|
||||
shard.dirty = realCount
|
||||
}
|
||||
|
||||
atomic.StorePointer(&shard.slots, newSlots)
|
||||
atomic.StoreUintptr(&shard.mask, newMask)
|
||||
|
||||
shard.seq.EndWrite()
|
||||
shard.dirtyMu.Unlock()
|
||||
}
|
||||
|
||||
// Range invokes f on each Key-Value pair stored in m. If any call to f returns
|
||||
// false, Range stops iteration and returns.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot of the
|
||||
// Map's contents: no Key will be visited more than once, but if the Value for
|
||||
// any Key is stored or deleted concurrently, Range may reflect any mapping for
|
||||
// that Key from any point during the Range call.
|
||||
//
|
||||
// f must not call other methods on m.
|
||||
func (m *goroutineLocksAtomicPtrMap) Range(f func(key int64, val *goroutineLocks) bool) {
|
||||
for si := 0; si < len(m.shards); si++ {
|
||||
shard := &m.shards[si]
|
||||
if !shard.doRange(f) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (shard *goroutineLocksapmShard) doRange(f func(key int64, val *goroutineLocks) bool) bool {
|
||||
|
||||
shard.rehashMu.Lock()
|
||||
defer shard.rehashMu.Unlock()
|
||||
slots := shard.slots
|
||||
if slots == nil {
|
||||
return true
|
||||
}
|
||||
mask := shard.mask
|
||||
for i := uintptr(0); i <= mask; i++ {
|
||||
slot := goroutineLocksapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == nil || slotVal == goroutineLockstombstone() {
|
||||
continue
|
||||
}
|
||||
if !f(slot.key, (*goroutineLocks)(slotVal)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RangeRepeatable is like Range, but:
|
||||
//
|
||||
// - RangeRepeatable may visit the same Key multiple times in the presence of
|
||||
// concurrent mutators, possibly passing different Values to f in different
|
||||
// calls.
|
||||
//
|
||||
// - It is safe for f to call other methods on m.
|
||||
func (m *goroutineLocksAtomicPtrMap) RangeRepeatable(f func(key int64, val *goroutineLocks) bool) {
|
||||
for si := 0; si < len(m.shards); si++ {
|
||||
shard := &m.shards[si]
|
||||
|
||||
retry:
|
||||
epoch := shard.seq.BeginRead()
|
||||
slots := atomic.LoadPointer(&shard.slots)
|
||||
mask := atomic.LoadUintptr(&shard.mask)
|
||||
if !shard.seq.ReadOk(epoch) {
|
||||
goto retry
|
||||
}
|
||||
if slots == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := uintptr(0); i <= mask; i++ {
|
||||
slot := goroutineLocksapmSlotAt(slots, i)
|
||||
slotVal := atomic.LoadPointer(&slot.val)
|
||||
if slotVal == goroutineLocksevacuated() {
|
||||
goto retry
|
||||
}
|
||||
if slotVal == nil || slotVal == goroutineLockstombstone() {
|
||||
continue
|
||||
}
|
||||
if !f(slot.key, (*goroutineLocks)(slotVal)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user