Update dependencies
This commit is contained in:
657
vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go
vendored
Normal file
657
vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go
vendored
Normal file
@@ -0,0 +1,657 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package buffer provides the implementation of a non-contiguous buffer that
|
||||
// is reference counted, pooled, and copy-on-write. It allows O(1) append,
|
||||
// and prepend operations.
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/tcpip/checksum"
|
||||
)
|
||||
|
||||
// Buffer is a non-linear buffer.
|
||||
//
|
||||
// +stateify savable
|
||||
type Buffer struct {
|
||||
data ViewList `state:".([]byte)"`
|
||||
size int64
|
||||
}
|
||||
|
||||
func (b *Buffer) removeView(v *View) {
|
||||
b.data.Remove(v)
|
||||
v.Release()
|
||||
}
|
||||
|
||||
// MakeWithData creates a new Buffer initialized with given data. This function
|
||||
// should be used with caution to avoid unnecessary []byte allocations. When in
|
||||
// doubt use NewWithView to maximize chunk reuse.
|
||||
func MakeWithData(b []byte) Buffer {
|
||||
buf := Buffer{}
|
||||
if len(b) == 0 {
|
||||
return buf
|
||||
}
|
||||
v := NewViewWithData(b)
|
||||
buf.Append(v)
|
||||
return buf
|
||||
}
|
||||
|
||||
// MakeWithView creates a new Buffer initialized with given view. This function
|
||||
// takes ownership of v.
|
||||
func MakeWithView(v *View) Buffer {
|
||||
if v == nil {
|
||||
return Buffer{}
|
||||
}
|
||||
b := Buffer{
|
||||
size: int64(v.Size()),
|
||||
}
|
||||
if b.size == 0 {
|
||||
v.Release()
|
||||
return b
|
||||
}
|
||||
b.data.PushBack(v)
|
||||
return b
|
||||
}
|
||||
|
||||
// Release frees all resources held by b.
|
||||
func (b *Buffer) Release() {
|
||||
for v := b.data.Front(); v != nil; v = b.data.Front() {
|
||||
b.removeView(v)
|
||||
}
|
||||
b.size = 0
|
||||
}
|
||||
|
||||
// TrimFront removes the first count bytes from the buffer.
|
||||
func (b *Buffer) TrimFront(count int64) {
|
||||
if count >= b.size {
|
||||
b.advanceRead(b.size)
|
||||
} else {
|
||||
b.advanceRead(count)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadAt implements io.ReaderAt.ReadAt.
|
||||
func (b *Buffer) ReadAt(p []byte, offset int64) (int, error) {
|
||||
var (
|
||||
skipped int64
|
||||
done int64
|
||||
)
|
||||
for v := b.data.Front(); v != nil && done < int64(len(p)); v = v.Next() {
|
||||
needToSkip := int(offset - skipped)
|
||||
if sz := v.Size(); sz <= needToSkip {
|
||||
skipped += int64(sz)
|
||||
continue
|
||||
}
|
||||
|
||||
// Actually read data.
|
||||
n := copy(p[done:], v.AsSlice()[needToSkip:])
|
||||
skipped += int64(needToSkip)
|
||||
done += int64(n)
|
||||
}
|
||||
if int(done) < len(p) || offset+done == b.size {
|
||||
return int(done), io.EOF
|
||||
}
|
||||
return int(done), nil
|
||||
}
|
||||
|
||||
// advanceRead advances the Buffer's read index.
|
||||
//
|
||||
// Precondition: there must be sufficient bytes in the buffer.
|
||||
func (b *Buffer) advanceRead(count int64) {
|
||||
for v := b.data.Front(); v != nil && count > 0; {
|
||||
sz := int64(v.Size())
|
||||
if sz > count {
|
||||
// There is still data for reading.
|
||||
v.TrimFront(int(count))
|
||||
b.size -= count
|
||||
count = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the whole view.
|
||||
oldView := v
|
||||
v = v.Next() // Iterate.
|
||||
b.removeView(oldView)
|
||||
|
||||
// Update counts.
|
||||
count -= sz
|
||||
b.size -= sz
|
||||
}
|
||||
if count > 0 {
|
||||
panic(fmt.Sprintf("advanceRead still has %d bytes remaining", count))
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate truncates the Buffer to the given length.
|
||||
//
|
||||
// This will not grow the Buffer, only shrink it. If a length is passed that is
|
||||
// greater than the current size of the Buffer, then nothing will happen.
|
||||
//
|
||||
// Precondition: length must be >= 0.
|
||||
func (b *Buffer) Truncate(length int64) {
|
||||
if length < 0 {
|
||||
panic("negative length provided")
|
||||
}
|
||||
if length >= b.size {
|
||||
return // Nothing to do.
|
||||
}
|
||||
for v := b.data.Back(); v != nil && b.size > length; v = b.data.Back() {
|
||||
sz := int64(v.Size())
|
||||
if after := b.size - sz; after < length {
|
||||
// Truncate the buffer locally.
|
||||
left := (length - after)
|
||||
v.write = v.read + int(left)
|
||||
b.size = length
|
||||
break
|
||||
}
|
||||
|
||||
// Drop the buffer completely; see above.
|
||||
b.removeView(v)
|
||||
b.size -= sz
|
||||
}
|
||||
}
|
||||
|
||||
// GrowTo grows the given Buffer to the number of bytes, which will be appended.
|
||||
// If zero is true, all these bytes will be zero. If zero is false, then this is
|
||||
// the caller's responsibility.
|
||||
//
|
||||
// Precondition: length must be >= 0.
|
||||
func (b *Buffer) GrowTo(length int64, zero bool) {
|
||||
if length < 0 {
|
||||
panic("negative length provided")
|
||||
}
|
||||
for b.size < length {
|
||||
v := b.data.Back()
|
||||
|
||||
// Is there some space in the last buffer?
|
||||
if v.Full() {
|
||||
v = NewView(int(length - b.size))
|
||||
b.data.PushBack(v)
|
||||
}
|
||||
|
||||
// Write up to length bytes.
|
||||
sz := v.AvailableSize()
|
||||
if int64(sz) > length-b.size {
|
||||
sz = int(length - b.size)
|
||||
}
|
||||
|
||||
// Zero the written section.
|
||||
if zero {
|
||||
clear(v.chunk.data[v.write : v.write+sz])
|
||||
}
|
||||
|
||||
// Advance the index.
|
||||
v.Grow(sz)
|
||||
b.size += int64(sz)
|
||||
}
|
||||
}
|
||||
|
||||
// Prepend prepends the given data. Prepend takes ownership of src.
|
||||
func (b *Buffer) Prepend(src *View) error {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
// If the first buffer does not have room just prepend the view.
|
||||
v := b.data.Front()
|
||||
if v == nil || v.read == 0 {
|
||||
b.prependOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there's room at the front and we won't incur a copy by writing to this
|
||||
// view, fill in the extra room first.
|
||||
if !v.sharesChunk() {
|
||||
avail := v.read
|
||||
vStart := 0
|
||||
srcStart := src.Size() - avail
|
||||
if avail > src.Size() {
|
||||
vStart = avail - src.Size()
|
||||
srcStart = 0
|
||||
}
|
||||
// Save the write index and restore it after.
|
||||
old := v.write
|
||||
v.read = vStart
|
||||
n, err := v.WriteAt(src.AsSlice()[srcStart:], 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not write to view during append: %w", err)
|
||||
}
|
||||
b.size += int64(n)
|
||||
v.write = old
|
||||
src.write = srcStart
|
||||
|
||||
// If there's no more to be written, then we're done.
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, just prepend the view.
|
||||
b.prependOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends the given data. Append takes ownership of src.
|
||||
func (b *Buffer) Append(src *View) error {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
// If the last buffer is full, just append the view.
|
||||
v := b.data.Back()
|
||||
if v.Full() {
|
||||
b.appendOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If a write won't incur a copy, then fill the back of the existing last
|
||||
// chunk.
|
||||
if !v.sharesChunk() {
|
||||
writeSz := src.Size()
|
||||
if src.Size() > v.AvailableSize() {
|
||||
writeSz = v.AvailableSize()
|
||||
}
|
||||
done, err := v.Write(src.AsSlice()[:writeSz])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not write to view during append: %w", err)
|
||||
}
|
||||
src.TrimFront(done)
|
||||
b.size += int64(done)
|
||||
if src.Size() == 0 {
|
||||
src.Release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// If there is still data left just append the src.
|
||||
b.appendOwned(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Buffer) appendOwned(v *View) {
|
||||
b.data.PushBack(v)
|
||||
b.size += int64(v.Size())
|
||||
}
|
||||
|
||||
func (b *Buffer) prependOwned(v *View) {
|
||||
b.data.PushFront(v)
|
||||
b.size += int64(v.Size())
|
||||
}
|
||||
|
||||
// PullUp makes the specified range contiguous and returns the backing memory.
|
||||
func (b *Buffer) PullUp(offset, length int) (View, bool) {
|
||||
if length == 0 {
|
||||
return View{}, true
|
||||
}
|
||||
tgt := Range{begin: offset, end: offset + length}
|
||||
if tgt.Intersect(Range{end: int(b.size)}).Len() != length {
|
||||
return View{}, false
|
||||
}
|
||||
|
||||
curr := Range{}
|
||||
v := b.data.Front()
|
||||
for ; v != nil; v = v.Next() {
|
||||
origLen := v.Size()
|
||||
curr.end = curr.begin + origLen
|
||||
|
||||
if x := curr.Intersect(tgt); x.Len() == tgt.Len() {
|
||||
// buf covers the whole requested target range.
|
||||
sub := x.Offset(-curr.begin)
|
||||
// Don't increment the reference count of the underlying chunk. Views
|
||||
// returned by PullUp are explicitly unowned and read only
|
||||
new := View{
|
||||
read: v.read + sub.begin,
|
||||
write: v.read + sub.end,
|
||||
chunk: v.chunk,
|
||||
}
|
||||
return new, true
|
||||
} else if x.Len() > 0 {
|
||||
// buf is pointing at the starting buffer we want to merge.
|
||||
break
|
||||
}
|
||||
|
||||
curr.begin += origLen
|
||||
}
|
||||
|
||||
// Calculate the total merged length.
|
||||
totLen := 0
|
||||
for n := v; n != nil; n = n.Next() {
|
||||
totLen += n.Size()
|
||||
if curr.begin+totLen >= tgt.end {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Merge the buffers.
|
||||
merged := NewViewSize(totLen)
|
||||
off := 0
|
||||
for n := v; n != nil && off < totLen; {
|
||||
merged.WriteAt(n.AsSlice(), off)
|
||||
off += n.Size()
|
||||
|
||||
// Remove buffers except for the first one, which will be reused.
|
||||
if n == v {
|
||||
n = n.Next()
|
||||
} else {
|
||||
old := n
|
||||
n = n.Next()
|
||||
b.removeView(old)
|
||||
}
|
||||
}
|
||||
// Make data the first buffer.
|
||||
b.data.InsertBefore(v, merged)
|
||||
b.removeView(v)
|
||||
|
||||
r := tgt.Offset(-curr.begin)
|
||||
pulled := View{
|
||||
read: r.begin,
|
||||
write: r.end,
|
||||
chunk: merged.chunk,
|
||||
}
|
||||
return pulled, true
|
||||
}
|
||||
|
||||
// Flatten returns a flattened copy of this data.
|
||||
//
|
||||
// This method should not be used in any performance-sensitive paths. It may
|
||||
// allocate a fresh byte slice sufficiently large to contain all the data in
|
||||
// the buffer. This is principally for debugging.
|
||||
//
|
||||
// N.B. Tee data still belongs to this Buffer, as if there is a single buffer
|
||||
// present, then it will be returned directly. This should be used for
|
||||
// temporary use only, and a reference to the given slice should not be held.
|
||||
func (b *Buffer) Flatten() []byte {
|
||||
if v := b.data.Front(); v == nil {
|
||||
return nil // No data at all.
|
||||
}
|
||||
data := make([]byte, 0, b.size) // Need to flatten.
|
||||
for v := b.data.Front(); v != nil; v = v.Next() {
|
||||
// Copy to the allocated slice.
|
||||
data = append(data, v.AsSlice()...)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// Size indicates the total amount of data available in this Buffer.
|
||||
func (b *Buffer) Size() int64 {
|
||||
return b.size
|
||||
}
|
||||
|
||||
// AsViewList returns the ViewList backing b. Users may not save or modify the
|
||||
// ViewList returned.
|
||||
func (b *Buffer) AsViewList() ViewList {
|
||||
return b.data
|
||||
}
|
||||
|
||||
// Clone creates a copy-on-write clone of b. The underlying chunks are shared
|
||||
// until they are written to.
|
||||
func (b *Buffer) Clone() Buffer {
|
||||
other := Buffer{
|
||||
size: b.size,
|
||||
}
|
||||
for v := b.data.Front(); v != nil; v = v.Next() {
|
||||
newView := v.Clone()
|
||||
other.data.PushBack(newView)
|
||||
}
|
||||
return other
|
||||
}
|
||||
|
||||
// DeepClone creates a deep clone of b, copying data such that no bytes are
|
||||
// shared with any other Buffers.
|
||||
func (b *Buffer) DeepClone() Buffer {
|
||||
newBuf := Buffer{}
|
||||
buf := b.Clone()
|
||||
reader := buf.AsBufferReader()
|
||||
newBuf.WriteFromReader(&reader, b.size)
|
||||
return newBuf
|
||||
}
|
||||
|
||||
// Apply applies the given function across all valid data.
|
||||
func (b *Buffer) Apply(fn func(*View)) {
|
||||
for v := b.data.Front(); v != nil; v = v.Next() {
|
||||
d := v.Clone()
|
||||
fn(d)
|
||||
d.Release()
|
||||
}
|
||||
}
|
||||
|
||||
// SubApply applies fn to a given range of data in b. Any part of the range
|
||||
// outside of b is ignored.
|
||||
func (b *Buffer) SubApply(offset, length int, fn func(*View)) {
|
||||
for v := b.data.Front(); length > 0 && v != nil; v = v.Next() {
|
||||
if offset >= v.Size() {
|
||||
offset -= v.Size()
|
||||
continue
|
||||
}
|
||||
d := v.Clone()
|
||||
if offset > 0 {
|
||||
d.TrimFront(offset)
|
||||
offset = 0
|
||||
}
|
||||
if length < d.Size() {
|
||||
d.write = d.read + length
|
||||
}
|
||||
fn(d)
|
||||
length -= d.Size()
|
||||
d.Release()
|
||||
}
|
||||
}
|
||||
|
||||
// Checksum calculates a checksum over the buffer's payload starting at offset.
|
||||
func (b *Buffer) Checksum(offset int) uint16 {
|
||||
if offset >= int(b.size) {
|
||||
return 0
|
||||
}
|
||||
var v *View
|
||||
for v = b.data.Front(); v != nil && offset >= v.Size(); v = v.Next() {
|
||||
offset -= v.Size()
|
||||
}
|
||||
|
||||
var cs checksum.Checksumer
|
||||
cs.Add(v.AsSlice()[offset:])
|
||||
for v = v.Next(); v != nil; v = v.Next() {
|
||||
cs.Add(v.AsSlice())
|
||||
}
|
||||
return cs.Checksum()
|
||||
}
|
||||
|
||||
// Merge merges the provided Buffer with this one.
|
||||
//
|
||||
// The other Buffer will be appended to v, and other will be empty after this
|
||||
// operation completes.
|
||||
func (b *Buffer) Merge(other *Buffer) {
|
||||
b.data.PushBackList(&other.data)
|
||||
other.data = ViewList{}
|
||||
|
||||
// Adjust sizes.
|
||||
b.size += other.size
|
||||
other.size = 0
|
||||
}
|
||||
|
||||
// WriteFromReader writes to the buffer from an io.Reader. A maximum read size
|
||||
// of MaxChunkSize is enforced to prevent allocating views from the heap.
|
||||
func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
|
||||
return b.WriteFromReaderAndLimitedReader(r, count, nil)
|
||||
}
|
||||
|
||||
// WriteFromReaderAndLimitedReader is the same as WriteFromReader, but
|
||||
// optimized to avoid allocations if a LimitedReader is passed in.
|
||||
//
|
||||
// This function clobbers the values of lr.
|
||||
func (b *Buffer) WriteFromReaderAndLimitedReader(r io.Reader, count int64, lr *io.LimitedReader) (int64, error) {
|
||||
if lr == nil {
|
||||
lr = &io.LimitedReader{}
|
||||
}
|
||||
|
||||
var done int64
|
||||
for done < count {
|
||||
vsize := count - done
|
||||
if vsize > MaxChunkSize {
|
||||
vsize = MaxChunkSize
|
||||
}
|
||||
v := NewView(int(vsize))
|
||||
lr.R = r
|
||||
lr.N = vsize
|
||||
n, err := io.Copy(v, lr)
|
||||
b.Append(v)
|
||||
done += n
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return done, err
|
||||
}
|
||||
}
|
||||
return done, nil
|
||||
}
|
||||
|
||||
// ReadToWriter reads from the buffer into an io.Writer.
|
||||
//
|
||||
// N.B. This does not consume the bytes read. TrimFront should
|
||||
// be called appropriately after this call in order to do so.
|
||||
func (b *Buffer) ReadToWriter(w io.Writer, count int64) (int64, error) {
|
||||
bytesLeft := int(count)
|
||||
for v := b.data.Front(); v != nil && bytesLeft > 0; v = v.Next() {
|
||||
view := v.Clone()
|
||||
if view.Size() > bytesLeft {
|
||||
view.CapLength(bytesLeft)
|
||||
}
|
||||
n, err := io.Copy(w, view)
|
||||
bytesLeft -= int(n)
|
||||
view.Release()
|
||||
if err != nil {
|
||||
return count - int64(bytesLeft), err
|
||||
}
|
||||
}
|
||||
return count - int64(bytesLeft), nil
|
||||
}
|
||||
|
||||
// read implements the io.Reader interface. This method is used by BufferReader
|
||||
// to consume its underlying buffer. To perform io operations on buffers
|
||||
// directly, use ReadToWriter or WriteToReader.
|
||||
func (b *Buffer) read(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
done := 0
|
||||
v := b.data.Front()
|
||||
for v != nil && done < len(p) {
|
||||
n, err := v.Read(p[done:])
|
||||
done += n
|
||||
next := v.Next()
|
||||
if v.Size() == 0 {
|
||||
b.removeView(v)
|
||||
}
|
||||
b.size -= int64(n)
|
||||
if err != nil && err != io.EOF {
|
||||
return done, err
|
||||
}
|
||||
v = next
|
||||
}
|
||||
return done, nil
|
||||
}
|
||||
|
||||
// readByte implements the io.ByteReader interface. This method is used by
|
||||
// BufferReader to consume its underlying buffer. To perform io operations on
|
||||
// buffers directly, use ReadToWriter or WriteToReader.
|
||||
func (b *Buffer) readByte() (byte, error) {
|
||||
if b.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
v := b.data.Front()
|
||||
bt := v.AsSlice()[0]
|
||||
b.TrimFront(1)
|
||||
return bt, nil
|
||||
}
|
||||
|
||||
// AsBufferReader returns the Buffer as a BufferReader capable of io methods.
|
||||
// The new BufferReader takes ownership of b.
|
||||
func (b *Buffer) AsBufferReader() BufferReader {
|
||||
return BufferReader{b}
|
||||
}
|
||||
|
||||
// BufferReader implements io methods on Buffer. Users must call Close()
|
||||
// when finished with the buffer to free the underlying memory.
|
||||
type BufferReader struct {
|
||||
b *Buffer
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (br *BufferReader) Read(p []byte) (int, error) {
|
||||
return br.b.read(p)
|
||||
}
|
||||
|
||||
// ReadByte implements the io.ByteReader interface.
|
||||
func (br *BufferReader) ReadByte() (byte, error) {
|
||||
return br.b.readByte()
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (br *BufferReader) Close() {
|
||||
br.b.Release()
|
||||
}
|
||||
|
||||
// Len returns the number of bytes in the unread portion of the buffer.
|
||||
func (br *BufferReader) Len() int {
|
||||
return int(br.b.Size())
|
||||
}
|
||||
|
||||
// Range specifies a range of buffer.
|
||||
type Range struct {
|
||||
begin int
|
||||
end int
|
||||
}
|
||||
|
||||
// Intersect returns the intersection of x and y.
|
||||
func (x Range) Intersect(y Range) Range {
|
||||
if x.begin < y.begin {
|
||||
x.begin = y.begin
|
||||
}
|
||||
if x.end > y.end {
|
||||
x.end = y.end
|
||||
}
|
||||
if x.begin >= x.end {
|
||||
return Range{}
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Offset returns x offset by off.
|
||||
func (x Range) Offset(off int) Range {
|
||||
x.begin += off
|
||||
x.end += off
|
||||
return x
|
||||
}
|
||||
|
||||
// Len returns the length of x.
|
||||
func (x Range) Len() int {
|
||||
l := x.end - x.begin
|
||||
if l < 0 {
|
||||
l = 0
|
||||
}
|
||||
return l
|
||||
}
|
||||
29
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go
vendored
Normal file
29
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// saveData is invoked by stateify.
|
||||
func (b *Buffer) saveData() []byte {
|
||||
return b.Flatten()
|
||||
}
|
||||
|
||||
// loadData is invoked by stateify.
|
||||
func (b *Buffer) loadData(_ context.Context, data []byte) {
|
||||
*b = MakeWithData(data)
|
||||
}
|
||||
187
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go
vendored
Normal file
187
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/state"
|
||||
)
|
||||
|
||||
func (b *Buffer) StateTypeName() string {
|
||||
return "pkg/buffer.Buffer"
|
||||
}
|
||||
|
||||
func (b *Buffer) StateFields() []string {
|
||||
return []string{
|
||||
"data",
|
||||
"size",
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Buffer) StateSave(stateSinkObject state.Sink) {
|
||||
b.beforeSave()
|
||||
var dataValue []byte
|
||||
dataValue = b.saveData()
|
||||
stateSinkObject.SaveValue(0, dataValue)
|
||||
stateSinkObject.Save(1, &b.size)
|
||||
}
|
||||
|
||||
func (b *Buffer) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (b *Buffer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(1, &b.size)
|
||||
stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(ctx, y.([]byte)) })
|
||||
}
|
||||
|
||||
func (c *chunk) StateTypeName() string {
|
||||
return "pkg/buffer.chunk"
|
||||
}
|
||||
|
||||
func (c *chunk) StateFields() []string {
|
||||
return []string{
|
||||
"chunkRefs",
|
||||
"data",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *chunk) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (c *chunk) StateSave(stateSinkObject state.Sink) {
|
||||
c.beforeSave()
|
||||
stateSinkObject.Save(0, &c.chunkRefs)
|
||||
stateSinkObject.Save(1, &c.data)
|
||||
}
|
||||
|
||||
func (c *chunk) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (c *chunk) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &c.chunkRefs)
|
||||
stateSourceObject.Load(1, &c.data)
|
||||
}
|
||||
|
||||
func (r *chunkRefs) StateTypeName() string {
|
||||
return "pkg/buffer.chunkRefs"
|
||||
}
|
||||
|
||||
func (r *chunkRefs) StateFields() []string {
|
||||
return []string{
|
||||
"refCount",
|
||||
}
|
||||
}
|
||||
|
||||
func (r *chunkRefs) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (r *chunkRefs) StateSave(stateSinkObject state.Sink) {
|
||||
r.beforeSave()
|
||||
stateSinkObject.Save(0, &r.refCount)
|
||||
}
|
||||
|
||||
// +checklocksignore
|
||||
func (r *chunkRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &r.refCount)
|
||||
stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) })
|
||||
}
|
||||
|
||||
func (v *View) StateTypeName() string {
|
||||
return "pkg/buffer.View"
|
||||
}
|
||||
|
||||
func (v *View) StateFields() []string {
|
||||
return []string{
|
||||
"read",
|
||||
"write",
|
||||
"chunk",
|
||||
}
|
||||
}
|
||||
|
||||
func (v *View) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (v *View) StateSave(stateSinkObject state.Sink) {
|
||||
v.beforeSave()
|
||||
stateSinkObject.Save(0, &v.read)
|
||||
stateSinkObject.Save(1, &v.write)
|
||||
stateSinkObject.Save(2, &v.chunk)
|
||||
}
|
||||
|
||||
func (v *View) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (v *View) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &v.read)
|
||||
stateSourceObject.Load(1, &v.write)
|
||||
stateSourceObject.Load(2, &v.chunk)
|
||||
}
|
||||
|
||||
func (l *ViewList) StateTypeName() string {
|
||||
return "pkg/buffer.ViewList"
|
||||
}
|
||||
|
||||
func (l *ViewList) StateFields() []string {
|
||||
return []string{
|
||||
"head",
|
||||
"tail",
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ViewList) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (l *ViewList) StateSave(stateSinkObject state.Sink) {
|
||||
l.beforeSave()
|
||||
stateSinkObject.Save(0, &l.head)
|
||||
stateSinkObject.Save(1, &l.tail)
|
||||
}
|
||||
|
||||
func (l *ViewList) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (l *ViewList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &l.head)
|
||||
stateSourceObject.Load(1, &l.tail)
|
||||
}
|
||||
|
||||
func (e *ViewEntry) StateTypeName() string {
|
||||
return "pkg/buffer.ViewEntry"
|
||||
}
|
||||
|
||||
func (e *ViewEntry) StateFields() []string {
|
||||
return []string{
|
||||
"next",
|
||||
"prev",
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ViewEntry) beforeSave() {}
|
||||
|
||||
// +checklocksignore
|
||||
func (e *ViewEntry) StateSave(stateSinkObject state.Sink) {
|
||||
e.beforeSave()
|
||||
stateSinkObject.Save(0, &e.next)
|
||||
stateSinkObject.Save(1, &e.prev)
|
||||
}
|
||||
|
||||
func (e *ViewEntry) afterLoad(context.Context) {}
|
||||
|
||||
// +checklocksignore
|
||||
func (e *ViewEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
|
||||
stateSourceObject.Load(0, &e.next)
|
||||
stateSourceObject.Load(1, &e.prev)
|
||||
}
|
||||
|
||||
func init() {
|
||||
state.Register((*Buffer)(nil))
|
||||
state.Register((*chunk)(nil))
|
||||
state.Register((*chunkRefs)(nil))
|
||||
state.Register((*View)(nil))
|
||||
state.Register((*ViewList)(nil))
|
||||
state.Register((*ViewEntry)(nil))
|
||||
}
|
||||
3
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_unsafe_state_autogen.go
vendored
Normal file
3
vendor/gvisor.dev/gvisor/pkg/buffer/buffer_unsafe_state_autogen.go
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// automatically generated by stateify.
|
||||
|
||||
package buffer
|
||||
113
vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go
vendored
Normal file
113
vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/bits"
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// This is log2(baseChunkSize). This number is used to calculate which pool
|
||||
// to use for a payload size by right shifting the payload size by this
|
||||
// number and passing the result to MostSignificantOne64.
|
||||
baseChunkSizeLog2 = 6
|
||||
|
||||
// This is the size of the buffers in the first pool. Each subsequent pool
|
||||
// creates payloads 2^(pool index) times larger than the first pool's
|
||||
// payloads.
|
||||
baseChunkSize = 1 << baseChunkSizeLog2 // 64
|
||||
|
||||
// MaxChunkSize is largest payload size that we pool. Payloads larger than
|
||||
// this will be allocated from the heap and garbage collected as normal.
|
||||
MaxChunkSize = baseChunkSize << (numPools - 1) // 64k
|
||||
|
||||
// The number of chunk pools we have for use.
|
||||
numPools = 11
|
||||
)
|
||||
|
||||
// chunkPools is a collection of pools for payloads of different sizes. The
|
||||
// size of the payloads doubles in each successive pool.
|
||||
var chunkPools [numPools]sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := 0; i < numPools; i++ {
|
||||
chunkSize := baseChunkSize * (1 << i)
|
||||
chunkPools[i].New = func() any {
|
||||
return &chunk{
|
||||
data: make([]byte, chunkSize),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Precondition: 0 <= size <= maxChunkSize
|
||||
func getChunkPool(size int) *sync.Pool {
|
||||
idx := 0
|
||||
if size > baseChunkSize {
|
||||
idx = bits.MostSignificantOne64(uint64(size) >> baseChunkSizeLog2)
|
||||
if size > 1<<(idx+baseChunkSizeLog2) {
|
||||
idx++
|
||||
}
|
||||
}
|
||||
if idx >= numPools {
|
||||
panic(fmt.Sprintf("pool for chunk size %d does not exist", size))
|
||||
}
|
||||
return &chunkPools[idx]
|
||||
}
|
||||
|
||||
// Chunk represents a slice of pooled memory.
|
||||
//
|
||||
// +stateify savable
|
||||
type chunk struct {
|
||||
chunkRefs
|
||||
data []byte
|
||||
}
|
||||
|
||||
func newChunk(size int) *chunk {
|
||||
var c *chunk
|
||||
if size > MaxChunkSize {
|
||||
c = &chunk{
|
||||
data: make([]byte, size),
|
||||
}
|
||||
} else {
|
||||
pool := getChunkPool(size)
|
||||
c = pool.Get().(*chunk)
|
||||
clear(c.data)
|
||||
}
|
||||
c.InitRefs()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *chunk) destroy() {
|
||||
if len(c.data) > MaxChunkSize {
|
||||
c.data = nil
|
||||
return
|
||||
}
|
||||
pool := getChunkPool(len(c.data))
|
||||
pool.Put(c)
|
||||
}
|
||||
|
||||
func (c *chunk) DecRef() {
|
||||
c.chunkRefs.DecRef(c.destroy)
|
||||
}
|
||||
|
||||
func (c *chunk) Clone() *chunk {
|
||||
cpy := newChunk(len(c.data))
|
||||
copy(cpy.data, c.data)
|
||||
return cpy
|
||||
}
|
||||
142
vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go
vendored
Normal file
142
vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/atomicbitops"
|
||||
"gvisor.dev/gvisor/pkg/refs"
|
||||
)
|
||||
|
||||
// enableLogging indicates whether reference-related events should be logged (with
|
||||
// stack traces). This is false by default and should only be set to true for
|
||||
// debugging purposes, as it can generate an extremely large amount of output
|
||||
// and drastically degrade performance.
|
||||
const chunkenableLogging = false
|
||||
|
||||
// obj is used to customize logging. Note that we use a pointer to T so that
|
||||
// we do not copy the entire object when passed as a format parameter.
|
||||
var chunkobj *chunk
|
||||
|
||||
// Refs implements refs.RefCounter. It keeps a reference count using atomic
|
||||
// operations and calls the destructor when the count reaches zero.
|
||||
//
|
||||
// NOTE: Do not introduce additional fields to the Refs struct. It is used by
|
||||
// many filesystem objects, and we want to keep it as small as possible (i.e.,
|
||||
// the same size as using an int64 directly) to avoid taking up extra cache
|
||||
// space. In general, this template should not be extended at the cost of
|
||||
// performance. If it does not offer enough flexibility for a particular object
|
||||
// (example: b/187877947), we should implement the RefCounter/CheckedObject
|
||||
// interfaces manually.
|
||||
//
|
||||
// +stateify savable
|
||||
type chunkRefs struct {
|
||||
// refCount is composed of two fields:
|
||||
//
|
||||
// [32-bit speculative references]:[32-bit real references]
|
||||
//
|
||||
// Speculative references are used for TryIncRef, to avoid a CompareAndSwap
|
||||
// loop. See IncRef, DecRef and TryIncRef for details of how these fields are
|
||||
// used.
|
||||
refCount atomicbitops.Int64
|
||||
}
|
||||
|
||||
// InitRefs initializes r with one reference and, if enabled, activates leak
|
||||
// checking.
|
||||
func (r *chunkRefs) InitRefs() {
|
||||
|
||||
r.refCount.RacyStore(1)
|
||||
refs.Register(r)
|
||||
}
|
||||
|
||||
// RefType implements refs.CheckedObject.RefType.
|
||||
func (r *chunkRefs) RefType() string {
|
||||
return fmt.Sprintf("%T", chunkobj)[1:]
|
||||
}
|
||||
|
||||
// LeakMessage implements refs.CheckedObject.LeakMessage.
|
||||
func (r *chunkRefs) LeakMessage() string {
|
||||
return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
|
||||
}
|
||||
|
||||
// LogRefs implements refs.CheckedObject.LogRefs.
|
||||
func (r *chunkRefs) LogRefs() bool {
|
||||
return chunkenableLogging
|
||||
}
|
||||
|
||||
// ReadRefs returns the current number of references. The returned count is
|
||||
// inherently racy and is unsafe to use without external synchronization.
|
||||
func (r *chunkRefs) ReadRefs() int64 {
|
||||
return r.refCount.Load()
|
||||
}
|
||||
|
||||
// IncRef implements refs.RefCounter.IncRef.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r *chunkRefs) IncRef() {
|
||||
v := r.refCount.Add(1)
|
||||
if chunkenableLogging {
|
||||
refs.LogIncRef(r, v)
|
||||
}
|
||||
if v <= 1 {
|
||||
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
|
||||
}
|
||||
}
|
||||
|
||||
// TryIncRef implements refs.TryRefCounter.TryIncRef.
|
||||
//
|
||||
// To do this safely without a loop, a speculative reference is first acquired
|
||||
// on the object. This allows multiple concurrent TryIncRef calls to distinguish
|
||||
// other TryIncRef calls from genuine references held.
|
||||
//
|
||||
//go:nosplit
|
||||
func (r *chunkRefs) TryIncRef() bool {
|
||||
const speculativeRef = 1 << 32
|
||||
if v := r.refCount.Add(speculativeRef); int32(v) == 0 {
|
||||
|
||||
r.refCount.Add(-speculativeRef)
|
||||
return false
|
||||
}
|
||||
|
||||
v := r.refCount.Add(-speculativeRef + 1)
|
||||
if chunkenableLogging {
|
||||
refs.LogTryIncRef(r, v)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DecRef implements refs.RefCounter.DecRef.
|
||||
//
|
||||
// Note that speculative references are counted here. Since they were added
|
||||
// prior to real references reaching zero, they will successfully convert to
|
||||
// real references. In other words, we see speculative references only in the
|
||||
// following case:
|
||||
//
|
||||
// A: TryIncRef [speculative increase => sees non-negative references]
|
||||
// B: DecRef [real decrease]
|
||||
// A: TryIncRef [transform speculative to real]
|
||||
//
|
||||
//go:nosplit
|
||||
func (r *chunkRefs) DecRef(destroy func()) {
|
||||
v := r.refCount.Add(-1)
|
||||
if chunkenableLogging {
|
||||
refs.LogDecRef(r, v)
|
||||
}
|
||||
switch {
|
||||
case v < 0:
|
||||
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
|
||||
|
||||
case v == 0:
|
||||
refs.Unregister(r)
|
||||
|
||||
if destroy != nil {
|
||||
destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *chunkRefs) afterLoad(context.Context) {
|
||||
if r.ReadRefs() > 0 {
|
||||
refs.Register(r)
|
||||
}
|
||||
}
|
||||
366
vendor/gvisor.dev/gvisor/pkg/buffer/view.go
vendored
Normal file
366
vendor/gvisor.dev/gvisor/pkg/buffer/view.go
vendored
Normal file
@@ -0,0 +1,366 @@
|
||||
// Copyright 2022 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/sync"
|
||||
)
|
||||
|
||||
// ReadSize is the default amount that a View's size is increased by when an
|
||||
// io.Reader has more data than a View can hold during calls to ReadFrom.
|
||||
const ReadSize = 512
|
||||
|
||||
var viewPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &View{}
|
||||
},
|
||||
}
|
||||
|
||||
// View is a window into a shared chunk. Views are held by Buffers in
|
||||
// viewLists to represent contiguous memory.
|
||||
//
|
||||
// A View must be created with NewView, NewViewWithData, or Clone. Owners are
|
||||
// responsible for maintaining ownership over their views. When Views need to be
|
||||
// shared or copied, the owner should create a new View with Clone. Clone must
|
||||
// only ever be called on a owned View, not a borrowed one.
|
||||
//
|
||||
// Users are responsible for calling Release when finished with their View so
|
||||
// that its resources can be returned to the pool.
|
||||
//
|
||||
// Users must not write directly to slices returned by AsSlice. Instead, they
|
||||
// must use Write/WriteAt/CopyIn to modify the underlying View. This preserves
|
||||
// the safety guarantees of copy-on-write.
|
||||
//
|
||||
// +stateify savable
|
||||
type View struct {
|
||||
ViewEntry `state:"nosave"`
|
||||
read int
|
||||
write int
|
||||
chunk *chunk
|
||||
}
|
||||
|
||||
// NewView creates a new view with capacity at least as big as cap. It is
|
||||
// analogous to make([]byte, 0, cap).
|
||||
func NewView(cap int) *View {
|
||||
c := newChunk(cap)
|
||||
v := viewPool.Get().(*View)
|
||||
*v = View{chunk: c}
|
||||
return v
|
||||
}
|
||||
|
||||
// NewViewSize creates a new view with capacity at least as big as size and
|
||||
// length that is exactly size. It is analogous to make([]byte, size).
|
||||
func NewViewSize(size int) *View {
|
||||
v := NewView(size)
|
||||
v.Grow(size)
|
||||
return v
|
||||
}
|
||||
|
||||
// NewViewWithData creates a new view and initializes it with data. This
|
||||
// function should be used with caution to avoid unnecessary []byte allocations.
|
||||
// When in doubt use NewWithView to maximize chunk reuse in production
|
||||
// environments.
|
||||
func NewViewWithData(data []byte) *View {
|
||||
c := newChunk(len(data))
|
||||
v := viewPool.Get().(*View)
|
||||
*v = View{chunk: c}
|
||||
v.Write(data)
|
||||
return v
|
||||
}
|
||||
|
||||
// Clone creates a shallow clone of v where the underlying chunk is shared.
|
||||
//
|
||||
// The caller must own the View to call Clone. It is not safe to call Clone
|
||||
// on a borrowed or shared View because it can race with other View methods.
|
||||
func (v *View) Clone() *View {
|
||||
if v == nil {
|
||||
panic("cannot clone a nil view")
|
||||
}
|
||||
v.chunk.IncRef()
|
||||
newV := viewPool.Get().(*View)
|
||||
newV.chunk = v.chunk
|
||||
newV.read = v.read
|
||||
newV.write = v.write
|
||||
return newV
|
||||
}
|
||||
|
||||
// Release releases the chunk held by v and returns v to the pool.
|
||||
func (v *View) Release() {
|
||||
if v == nil {
|
||||
panic("cannot release a nil view")
|
||||
}
|
||||
v.chunk.DecRef()
|
||||
*v = View{}
|
||||
viewPool.Put(v)
|
||||
}
|
||||
|
||||
// Reset sets the view's read and write indices back to zero.
|
||||
func (v *View) Reset() {
|
||||
if v == nil {
|
||||
panic("cannot reset a nil view")
|
||||
}
|
||||
v.read = 0
|
||||
v.write = 0
|
||||
}
|
||||
|
||||
func (v *View) sharesChunk() bool {
|
||||
return v.chunk.refCount.Load() > 1
|
||||
}
|
||||
|
||||
// Full indicates the chunk is full.
|
||||
//
|
||||
// This indicates there is no capacity left to write.
|
||||
func (v *View) Full() bool {
|
||||
return v == nil || v.write == len(v.chunk.data)
|
||||
}
|
||||
|
||||
// Capacity returns the total size of this view's chunk.
|
||||
func (v *View) Capacity() int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return len(v.chunk.data)
|
||||
}
|
||||
|
||||
// Size returns the size of data written to the view.
|
||||
func (v *View) Size() int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return v.write - v.read
|
||||
}
|
||||
|
||||
// TrimFront advances the read index by the given amount.
|
||||
func (v *View) TrimFront(n int) {
|
||||
if v.read+n > v.write {
|
||||
panic("cannot trim past the end of a view")
|
||||
}
|
||||
v.read += n
|
||||
}
|
||||
|
||||
// AsSlice returns a slice of the data written to this view.
|
||||
func (v *View) AsSlice() []byte {
|
||||
if v.Size() == 0 {
|
||||
return nil
|
||||
}
|
||||
return v.chunk.data[v.read:v.write]
|
||||
}
|
||||
|
||||
// ToSlice returns an owned copy of the data in this view.
|
||||
func (v *View) ToSlice() []byte {
|
||||
if v.Size() == 0 {
|
||||
return nil
|
||||
}
|
||||
s := make([]byte, v.Size())
|
||||
copy(s, v.AsSlice())
|
||||
return s
|
||||
}
|
||||
|
||||
// AvailableSize returns the number of bytes available for writing.
|
||||
func (v *View) AvailableSize() int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return len(v.chunk.data) - v.write
|
||||
}
|
||||
|
||||
// Read reads v's data into p.
|
||||
//
|
||||
// Implements the io.Reader interface.
|
||||
func (v *View) Read(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if v.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(p, v.AsSlice())
|
||||
v.TrimFront(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadByte implements the io.ByteReader interface.
|
||||
func (v *View) ReadByte() (byte, error) {
|
||||
if v.Size() == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := v.AsSlice()[0]
|
||||
v.read++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until the view is empty or an error occurs. The
|
||||
// return value n is the number of bytes written.
|
||||
//
|
||||
// WriteTo implements the io.WriterTo interface.
|
||||
func (v *View) WriteTo(w io.Writer) (n int64, err error) {
|
||||
if v.Size() > 0 {
|
||||
sz := v.Size()
|
||||
m, e := w.Write(v.AsSlice())
|
||||
v.TrimFront(m)
|
||||
n = int64(m)
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
if m != sz {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadAt reads data to the p starting at offset.
|
||||
//
|
||||
// Implements the io.ReaderAt interface.
|
||||
func (v *View) ReadAt(p []byte, off int) (int, error) {
|
||||
if off < 0 || off > v.Size() {
|
||||
return 0, fmt.Errorf("ReadAt(): offset out of bounds: want 0 < off < %d, got off=%d", v.Size(), off)
|
||||
}
|
||||
n := copy(p, v.AsSlice()[off:])
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Write writes data to the view's chunk starting at the v.write index. If the
|
||||
// view's chunk has a reference count greater than 1, the chunk is copied first
|
||||
// and then written to.
|
||||
//
|
||||
// Implements the io.Writer interface.
|
||||
func (v *View) Write(p []byte) (int, error) {
|
||||
if v == nil {
|
||||
panic("cannot write to a nil view")
|
||||
}
|
||||
if v.AvailableSize() < len(p) {
|
||||
v.growCap(len(p) - v.AvailableSize())
|
||||
} else if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
v.chunk = v.chunk.Clone()
|
||||
}
|
||||
n := copy(v.chunk.data[v.write:], p)
|
||||
v.write += n
|
||||
if n < len(p) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
|
||||
// the buffer as needed. The return value n is the number of bytes read. Any
|
||||
// error except io.EOF encountered during the read is also returned.
|
||||
//
|
||||
// ReadFrom implements the io.ReaderFrom interface.
|
||||
func (v *View) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
if v == nil {
|
||||
panic("cannot write to a nil view")
|
||||
}
|
||||
if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
v.chunk = v.chunk.Clone()
|
||||
}
|
||||
for {
|
||||
// Check for EOF to avoid an unnnecesary allocation.
|
||||
if _, e := r.Read(nil); e == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if v.AvailableSize() == 0 {
|
||||
v.growCap(ReadSize)
|
||||
}
|
||||
m, e := r.Read(v.availableSlice())
|
||||
v.write += m
|
||||
n += int64(m)
|
||||
|
||||
if e == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteAt writes data to the views's chunk starting at start. If the
|
||||
// view's chunk has a reference count greater than 1, the chunk is copied first
|
||||
// and then written to.
|
||||
//
|
||||
// Implements the io.WriterAt interface.
|
||||
func (v *View) WriteAt(p []byte, off int) (int, error) {
|
||||
if v == nil {
|
||||
panic("cannot write to a nil view")
|
||||
}
|
||||
if off < 0 || off > v.Size() {
|
||||
return 0, fmt.Errorf("write offset out of bounds: want 0 < off < %d, got off=%d", v.Size(), off)
|
||||
}
|
||||
if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
v.chunk = v.chunk.Clone()
|
||||
}
|
||||
n := copy(v.AsSlice()[off:], p)
|
||||
if n < len(p) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Grow increases the size of the view. If the new size is greater than the
|
||||
// view's current capacity, Grow will reallocate the view with an increased
|
||||
// capacity.
|
||||
func (v *View) Grow(n int) {
|
||||
if v == nil {
|
||||
panic("cannot grow a nil view")
|
||||
}
|
||||
if v.write+n > v.Capacity() {
|
||||
v.growCap(n)
|
||||
}
|
||||
v.write += n
|
||||
}
|
||||
|
||||
// growCap increases the capacity of the view by at least n.
|
||||
func (v *View) growCap(n int) {
|
||||
if v == nil {
|
||||
panic("cannot grow a nil view")
|
||||
}
|
||||
defer v.chunk.DecRef()
|
||||
old := v.AsSlice()
|
||||
v.chunk = newChunk(v.Capacity() + n)
|
||||
copy(v.chunk.data, old)
|
||||
v.read = 0
|
||||
v.write = len(old)
|
||||
}
|
||||
|
||||
// CapLength caps the length of the view's read slice to n. If n > v.Size(),
|
||||
// the function is a no-op.
|
||||
func (v *View) CapLength(n int) {
|
||||
if v == nil {
|
||||
panic("cannot resize a nil view")
|
||||
}
|
||||
if n < 0 {
|
||||
panic("n must be >= 0")
|
||||
}
|
||||
if n > v.Size() {
|
||||
n = v.Size()
|
||||
}
|
||||
v.write = v.read + n
|
||||
}
|
||||
|
||||
func (v *View) availableSlice() []byte {
|
||||
if v.sharesChunk() {
|
||||
defer v.chunk.DecRef()
|
||||
c := v.chunk.Clone()
|
||||
v.chunk = c
|
||||
}
|
||||
return v.chunk.data[v.write:]
|
||||
}
|
||||
239
vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go
vendored
Normal file
239
vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
package buffer
|
||||
|
||||
// ElementMapper provides an identity mapping by default.
|
||||
//
|
||||
// This can be replaced to provide a struct that maps elements to linker
|
||||
// objects, if they are not the same. An ElementMapper is not typically
|
||||
// required if: Linker is left as is, Element is left as is, or Linker and
|
||||
// Element are the same type.
|
||||
type ViewElementMapper struct{}
|
||||
|
||||
// linkerFor maps an Element to a Linker.
|
||||
//
|
||||
// This default implementation should be inlined.
|
||||
//
|
||||
//go:nosplit
|
||||
func (ViewElementMapper) linkerFor(elem *View) *View { return elem }
|
||||
|
||||
// List is an intrusive list. Entries can be added to or removed from the list
|
||||
// in O(1) time and with no additional memory allocations.
|
||||
//
|
||||
// The zero value for List is an empty list ready to use.
|
||||
//
|
||||
// To iterate over a list (where l is a List):
|
||||
//
|
||||
// for e := l.Front(); e != nil; e = e.Next() {
|
||||
// // do something with e.
|
||||
// }
|
||||
//
|
||||
// +stateify savable
|
||||
type ViewList struct {
|
||||
head *View
|
||||
tail *View
|
||||
}
|
||||
|
||||
// Reset resets list l to the empty state.
|
||||
func (l *ViewList) Reset() {
|
||||
l.head = nil
|
||||
l.tail = nil
|
||||
}
|
||||
|
||||
// Empty returns true iff the list is empty.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Empty() bool {
|
||||
return l.head == nil
|
||||
}
|
||||
|
||||
// Front returns the first element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Front() *View {
|
||||
return l.head
|
||||
}
|
||||
|
||||
// Back returns the last element of list l or nil.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Back() *View {
|
||||
return l.tail
|
||||
}
|
||||
|
||||
// Len returns the number of elements in the list.
|
||||
//
|
||||
// NOTE: This is an O(n) operation.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Len() (count int) {
|
||||
for e := l.Front(); e != nil; e = (ViewElementMapper{}.linkerFor(e)).Next() {
|
||||
count++
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// PushFront inserts the element e at the front of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushFront(e *View) {
|
||||
linker := ViewElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(l.head)
|
||||
linker.SetPrev(nil)
|
||||
if l.head != nil {
|
||||
ViewElementMapper{}.linkerFor(l.head).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
l.head = e
|
||||
}
|
||||
|
||||
// PushFrontList inserts list m at the start of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushFrontList(m *ViewList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
ViewElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
|
||||
ViewElementMapper{}.linkerFor(m.tail).SetNext(l.head)
|
||||
|
||||
l.head = m.head
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// PushBack inserts the element e at the back of list l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushBack(e *View) {
|
||||
linker := ViewElementMapper{}.linkerFor(e)
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(l.tail)
|
||||
if l.tail != nil {
|
||||
ViewElementMapper{}.linkerFor(l.tail).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
|
||||
l.tail = e
|
||||
}
|
||||
|
||||
// PushBackList inserts list m at the end of list l, emptying m.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) PushBackList(m *ViewList) {
|
||||
if l.head == nil {
|
||||
l.head = m.head
|
||||
l.tail = m.tail
|
||||
} else if m.head != nil {
|
||||
ViewElementMapper{}.linkerFor(l.tail).SetNext(m.head)
|
||||
ViewElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
|
||||
|
||||
l.tail = m.tail
|
||||
}
|
||||
m.head = nil
|
||||
m.tail = nil
|
||||
}
|
||||
|
||||
// InsertAfter inserts e after b.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) InsertAfter(b, e *View) {
|
||||
bLinker := ViewElementMapper{}.linkerFor(b)
|
||||
eLinker := ViewElementMapper{}.linkerFor(e)
|
||||
|
||||
a := bLinker.Next()
|
||||
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
bLinker.SetNext(e)
|
||||
|
||||
if a != nil {
|
||||
ViewElementMapper{}.linkerFor(a).SetPrev(e)
|
||||
} else {
|
||||
l.tail = e
|
||||
}
|
||||
}
|
||||
|
||||
// InsertBefore inserts e before a.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) InsertBefore(a, e *View) {
|
||||
aLinker := ViewElementMapper{}.linkerFor(a)
|
||||
eLinker := ViewElementMapper{}.linkerFor(e)
|
||||
|
||||
b := aLinker.Prev()
|
||||
eLinker.SetNext(a)
|
||||
eLinker.SetPrev(b)
|
||||
aLinker.SetPrev(e)
|
||||
|
||||
if b != nil {
|
||||
ViewElementMapper{}.linkerFor(b).SetNext(e)
|
||||
} else {
|
||||
l.head = e
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes e from l.
|
||||
//
|
||||
//go:nosplit
|
||||
func (l *ViewList) Remove(e *View) {
|
||||
linker := ViewElementMapper{}.linkerFor(e)
|
||||
prev := linker.Prev()
|
||||
next := linker.Next()
|
||||
|
||||
if prev != nil {
|
||||
ViewElementMapper{}.linkerFor(prev).SetNext(next)
|
||||
} else if l.head == e {
|
||||
l.head = next
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
ViewElementMapper{}.linkerFor(next).SetPrev(prev)
|
||||
} else if l.tail == e {
|
||||
l.tail = prev
|
||||
}
|
||||
|
||||
linker.SetNext(nil)
|
||||
linker.SetPrev(nil)
|
||||
}
|
||||
|
||||
// Entry is a default implementation of Linker. Users can add anonymous fields
|
||||
// of this type to their structs to make them automatically implement the
|
||||
// methods needed by List.
|
||||
//
|
||||
// +stateify savable
|
||||
type ViewEntry struct {
|
||||
next *View
|
||||
prev *View
|
||||
}
|
||||
|
||||
// Next returns the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) Next() *View {
|
||||
return e.next
|
||||
}
|
||||
|
||||
// Prev returns the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) Prev() *View {
|
||||
return e.prev
|
||||
}
|
||||
|
||||
// SetNext assigns 'entry' as the entry that follows e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) SetNext(elem *View) {
|
||||
e.next = elem
|
||||
}
|
||||
|
||||
// SetPrev assigns 'entry' as the entry that precedes e in the list.
|
||||
//
|
||||
//go:nosplit
|
||||
func (e *ViewEntry) SetPrev(elem *View) {
|
||||
e.prev = elem
|
||||
}
|
||||
26
vendor/gvisor.dev/gvisor/pkg/buffer/view_unsafe.go
vendored
Normal file
26
vendor/gvisor.dev/gvisor/pkg/buffer/view_unsafe.go
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// BasePtr returns a pointer to the view's chunk.
|
||||
func (v *View) BasePtr() *byte {
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&v.chunk.data))
|
||||
return (*byte)(unsafe.Pointer(hdr.Data))
|
||||
}
|
||||
Reference in New Issue
Block a user