Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

41
vendor/github.com/u-root/uio/uio/alignreader.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
// Copyright 2019 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"io"
)
// AlignReader keeps track of how many bytes were read so the reader can be
// aligned at a future time.
type AlignReader struct {
R io.Reader
N int
}
// Read reads from the underlying io.Reader.
func (r *AlignReader) Read(b []byte) (int, error) {
n, err := r.R.Read(b)
r.N += n
return n, err
}
// ReadByte reads one byte from the underlying io.Reader.
func (r *AlignReader) ReadByte() (byte, error) {
b := make([]byte, 1)
_, err := io.ReadFull(r, b)
return b[0], err
}
// Align aligns the reader to the given number of bytes and returns the
// bytes read to pad it.
func (r *AlignReader) Align(n int) ([]byte, error) {
if r.N%n == 0 {
return []byte{}, nil
}
pad := make([]byte, n-r.N%n)
m, err := io.ReadFull(r, pad)
return pad[:m], err
}

34
vendor/github.com/u-root/uio/uio/alignwriter.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
// Copyright 2019 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"bytes"
"io"
)
// AlignWriter keeps track of how many bytes were written so the writer can be
// aligned at a future time.
type AlignWriter struct {
W io.Writer
N int
}
// Write writes to the underlying io.Writew.
func (w *AlignWriter) Write(b []byte) (int, error) {
n, err := w.W.Write(b)
w.N += n
return n, err
}
// Align aligns the writer to the given number of bytes using the given pad
// value.
func (w *AlignWriter) Align(n int, pad byte) error {
if w.N%n == 0 {
return nil
}
_, err := w.Write(bytes.Repeat([]byte{pad}, n-w.N%n))
return err
}

85
vendor/github.com/u-root/uio/uio/archivereader.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright 2021 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"bytes"
"errors"
"io"
"github.com/pierrec/lz4/v4"
)
const (
// preReadSizeBytes is the num of bytes pre-read from a io.Reader that will
// be used to match against archive header.
defaultArchivePreReadSizeBytes = 1024
)
var ErrPreReadError = errors.New("pre-read nothing")
// ArchiveReader reads from a io.Reader, decompresses source bytes
// when applicable.
//
// It allows probing for multiple archive format, while still able
// to read from beginning, by pre-reading a small number of bytes.
//
// Always use newArchiveReader to initialize.
type ArchiveReader struct {
// src is where we read source bytes.
src io.Reader
// buf stores pre-read bytes from original io.Reader. Archive format
// detection will be done against it.
buf []byte
// preReadSizeBytes is how many bytes we pre-read for magic number
// matching for each archive type. This should be greater than or
// equal to the largest header frame size of each supported archive
// format.
preReadSizeBytes int
}
func NewArchiveReader(r io.Reader) (ArchiveReader, error) {
ar := ArchiveReader{
src: r,
// Randomly chosen, should be enough for most types:
//
// e.g. gzip with 10 byte header, lz4 with a header size
// between 7 and 19 bytes.
preReadSizeBytes: defaultArchivePreReadSizeBytes,
}
pbuf := make([]byte, ar.preReadSizeBytes)
nr, err := io.ReadFull(r, pbuf)
// In case the image is smaller pre-read block size, 1kb for now.
// Ever possible ? probably not in case a compression is needed!
ar.buf = pbuf[:nr]
if err == io.EOF {
// If we could not pre-read anything, we can't determine if
// it is a compressed file.
ar.src = io.MultiReader(bytes.NewReader(pbuf[:nr]), r)
return ar, ErrPreReadError
}
// Try each supported compression type, return upon first match.
// Try lz4.
// magic number error will be thrown if source is not a lz4 archive.
// e.g. "lz4: bad magic number".
if ok, err := lz4.ValidFrameHeader(ar.buf); err == nil && ok {
ar.src = lz4.NewReader(io.MultiReader(bytes.NewReader(ar.buf), r))
return ar, nil
}
// Try other archive types here, gzip, xz, etc when needed.
// Last resort, read as is.
ar.src = io.MultiReader(bytes.NewReader(ar.buf), r)
return ar, nil
}
func (ar ArchiveReader) Read(p []byte) (n int, err error) {
return ar.src.Read(p)
}

382
vendor/github.com/u-root/uio/uio/buffer.go generated vendored Normal file
View File

@@ -0,0 +1,382 @@
// Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"encoding/binary"
"errors"
"fmt"
"github.com/josharian/native"
)
// Marshaler is the interface implemented by an object that can marshal itself
// into binary form.
//
// Marshal appends data to the buffer b.
type Marshaler interface {
Marshal(l *Lexer)
}
// Unmarshaler is the interface implemented by an object that can unmarshal a
// binary representation of itself.
//
// Unmarshal Consumes data from the buffer b.
type Unmarshaler interface {
Unmarshal(l *Lexer) error
}
// ToBytes marshals m in the given byte order.
func ToBytes(m Marshaler, order binary.ByteOrder) []byte {
l := NewLexer(NewBuffer(nil), order)
m.Marshal(l)
return l.Data()
}
// FromBytes unmarshals b into obj in the given byte order.
func FromBytes(obj Unmarshaler, b []byte, order binary.ByteOrder) error {
l := NewLexer(NewBuffer(b), order)
return obj.Unmarshal(l)
}
// ToBigEndian marshals m to big endian byte order.
func ToBigEndian(m Marshaler) []byte {
l := NewBigEndianBuffer(nil)
m.Marshal(l)
return l.Data()
}
// FromBigEndian unmarshals b into obj in big endian byte order.
func FromBigEndian(obj Unmarshaler, b []byte) error {
l := NewBigEndianBuffer(b)
return obj.Unmarshal(l)
}
// ToLittleEndian marshals m to little endian byte order.
func ToLittleEndian(m Marshaler) []byte {
l := NewLittleEndianBuffer(nil)
m.Marshal(l)
return l.Data()
}
// FromLittleEndian unmarshals b into obj in little endian byte order.
func FromLittleEndian(obj Unmarshaler, b []byte) error {
l := NewLittleEndianBuffer(b)
return obj.Unmarshal(l)
}
// Buffer implements functions to manipulate byte slices in a zero-copy way.
type Buffer struct {
// data is the underlying data.
data []byte
// byteCount keeps track of how many bytes have been consumed for
// debugging.
byteCount int
}
// NewBuffer Consumes b for marshaling or unmarshaling in the given byte order.
func NewBuffer(b []byte) *Buffer {
return &Buffer{data: b}
}
// Preallocate increases the capacity of the buffer by n bytes.
func (b *Buffer) Preallocate(n int) {
b.data = append(b.data, make([]byte, 0, n)...)
}
// WriteN appends n bytes to the Buffer and returns a slice pointing to the
// newly appended bytes.
func (b *Buffer) WriteN(n int) []byte {
b.data = append(b.data, make([]byte, n)...)
return b.data[len(b.data)-n:]
}
// ErrBufferTooShort is returned when a caller wants to read more bytes than
// are available in the buffer.
var ErrBufferTooShort = errors.New("buffer too short")
// ReadN consumes n bytes from the Buffer. It returns nil, false if there
// aren't enough bytes left.
func (b *Buffer) ReadN(n int) ([]byte, error) {
if !b.Has(n) {
return nil, fmt.Errorf("%w at position %d: have %d bytes, want %d bytes", ErrBufferTooShort, b.byteCount, b.Len(), n)
}
rval := b.data[:n]
b.data = b.data[n:]
b.byteCount += n
return rval, nil
}
// Data is unConsumed data remaining in the Buffer.
func (b *Buffer) Data() []byte {
return b.data
}
// Has returns true if n bytes are available.
func (b *Buffer) Has(n int) bool {
return len(b.data) >= n
}
// Len returns the length of the remaining bytes.
func (b *Buffer) Len() int {
return len(b.data)
}
// Cap returns the available capacity.
func (b *Buffer) Cap() int {
return cap(b.data)
}
// Lexer is a convenient encoder/decoder for buffers.
//
// Use:
//
// func (s *something) Unmarshal(l *Lexer) {
// s.Foo = l.Read8()
// s.Bar = l.Read8()
// s.Baz = l.Read16()
// return l.Error()
// }
type Lexer struct {
*Buffer
// order is the byte order to write in / read in.
order binary.ByteOrder
// err
err error
}
// NewLexer returns a new coder for buffers.
func NewLexer(b *Buffer, order binary.ByteOrder) *Lexer {
return &Lexer{
Buffer: b,
order: order,
}
}
// NewLittleEndianBuffer returns a new little endian coder for a new buffer.
func NewLittleEndianBuffer(b []byte) *Lexer {
return &Lexer{
Buffer: NewBuffer(b),
order: binary.LittleEndian,
}
}
// NewBigEndianBuffer returns a new big endian coder for a new buffer.
func NewBigEndianBuffer(b []byte) *Lexer {
return &Lexer{
Buffer: NewBuffer(b),
order: binary.BigEndian,
}
}
// NewNativeEndianBuffer returns a new native endian coder for a new buffer.
func NewNativeEndianBuffer(b []byte) *Lexer {
return &Lexer{
Buffer: NewBuffer(b),
order: native.Endian,
}
}
// SetError sets the error if no error has previously been set.
//
// The error can later be retried with Error or FinError methods.
func (l *Lexer) SetError(err error) {
if l.err == nil {
l.err = err
}
}
// Consume returns a slice of the next n bytes from the buffer.
//
// Consume gives direct access to the underlying data.
func (l *Lexer) Consume(n int) []byte {
v, err := l.Buffer.ReadN(n)
if err != nil {
l.SetError(err)
return nil
}
return v
}
func (l *Lexer) append(n int) []byte {
return l.Buffer.WriteN(n)
}
// Error returns an error if an error occurred reading from the buffer.
func (l *Lexer) Error() error {
return l.err
}
// ErrUnreadBytes is returned when there is more data left to read in the buffer.
var ErrUnreadBytes = errors.New("buffer contains unread bytes")
// FinError returns an error if an error occurred or if there is more data left
// to read in the buffer.
func (l *Lexer) FinError() error {
if l.err != nil {
return l.err
}
if l.Buffer.Len() > 0 {
return ErrUnreadBytes
}
return nil
}
// Read8 reads a byte from the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Read8() uint8 {
v := l.Consume(1)
if v == nil {
return 0
}
return v[0]
}
// Read16 reads a 16-bit value from the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Read16() uint16 {
v := l.Consume(2)
if v == nil {
return 0
}
return l.order.Uint16(v)
}
// Read32 reads a 32-bit value from the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Read32() uint32 {
v := l.Consume(4)
if v == nil {
return 0
}
return l.order.Uint32(v)
}
// Read64 reads a 64-bit value from the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Read64() uint64 {
v := l.Consume(8)
if v == nil {
return 0
}
return l.order.Uint64(v)
}
// CopyN returns a copy of the next n bytes.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) CopyN(n int) []byte {
v := l.Consume(n)
if v == nil {
return nil
}
p := make([]byte, n)
m := copy(p, v)
return p[:m]
}
// ReadAll Consumes and returns a copy of all remaining bytes in the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) ReadAll() []byte {
return l.CopyN(l.Len())
}
// ReadBytes reads exactly len(p) values from the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) ReadBytes(p []byte) {
copy(p, l.Consume(len(p)))
}
// Read implements io.Reader.Read.
func (l *Lexer) Read(p []byte) (int, error) {
v := l.Consume(len(p))
if v == nil {
return 0, l.Error()
}
return copy(p, v), nil
}
// ReadData reads the binary representation of data from the buffer.
//
// See binary.Read.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) ReadData(data interface{}) {
l.SetError(binary.Read(l, l.order, data))
}
// WriteData writes a binary representation of data to the buffer.
//
// See binary.Write.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) WriteData(data interface{}) {
l.SetError(binary.Write(l, l.order, data))
}
// Write8 writes a byte to the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Write8(v uint8) {
l.append(1)[0] = v
}
// Write16 writes a 16-bit value to the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Write16(v uint16) {
l.order.PutUint16(l.append(2), v)
}
// Write32 writes a 32-bit value to the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Write32(v uint32) {
l.order.PutUint32(l.append(4), v)
}
// Write64 writes a 64-bit value to the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Write64(v uint64) {
l.order.PutUint64(l.append(8), v)
}
// Append returns a newly appended n-size Buffer to write to.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Append(n int) []byte {
return l.append(n)
}
// WriteBytes writes p to the Buffer.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) WriteBytes(p []byte) {
copy(l.append(len(p)), p)
}
// Write implements io.Writer.Write.
//
// If an error occurred, Error() will return a non-nil error.
func (l *Lexer) Write(p []byte) (int, error) {
return copy(l.append(len(p)), p), nil
}
// Align appends bytes to align the length of the buffer to be divisible by n.
func (l *Lexer) Align(n int) {
pad := ((l.Len() + n - 1) &^ (n - 1)) - l.Len()
l.Append(pad)
}

98
vendor/github.com/u-root/uio/uio/cached.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
// Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"bytes"
"io"
)
// CachingReader is a lazily caching wrapper of an io.Reader.
//
// The wrapped io.Reader is only read from on demand, not upfront.
type CachingReader struct {
buf bytes.Buffer
r io.Reader
pos int
eof bool
}
// NewCachingReader buffers reads from r.
//
// r is only read from when Read() is called.
func NewCachingReader(r io.Reader) *CachingReader {
return &CachingReader{
r: r,
}
}
func (cr *CachingReader) read(p []byte) (int, error) {
n, err := cr.r.Read(p)
cr.buf.Write(p[:n])
if err == io.EOF || (n == 0 && err == nil) {
cr.eof = true
return n, io.EOF
}
return n, err
}
// NewReader returns a new io.Reader that reads cr from offset 0.
func (cr *CachingReader) NewReader() io.Reader {
return Reader(cr)
}
// Read reads from cr; implementing io.Reader.
//
// TODO(chrisko): Decide whether to keep this or only keep NewReader().
func (cr *CachingReader) Read(p []byte) (int, error) {
n, err := cr.ReadAt(p, int64(cr.pos))
cr.pos += n
return n, err
}
// ReadAt reads from cr; implementing io.ReaderAt.
func (cr *CachingReader) ReadAt(p []byte, off int64) (int, error) {
if len(p) == 0 {
return 0, nil
}
end := int(off) + len(p)
// Is the caller asking for some uncached bytes?
unread := end - cr.buf.Len()
if unread > 0 {
// Avoiding allocations: use `p` to read more bytes.
for unread > 0 {
toRead := unread % len(p)
if toRead == 0 {
toRead = len(p)
}
m, err := cr.read(p[:toRead])
unread -= m
if err == io.EOF {
break
}
if err != nil {
return 0, err
}
}
}
// If this is true, the entire file was read just to find out, but the
// offset is beyond the end of the file.
if off > int64(cr.buf.Len()) {
return 0, io.EOF
}
var err error
// Did the caller ask for more than was available?
//
// Note that any io.ReaderAt implementation *must* return an error for
// short reads.
if cr.eof && unread > 0 {
err = io.EOF
}
return copy(p, cr.buf.Bytes()[off:]), err
}

165
vendor/github.com/u-root/uio/uio/lazy.go generated vendored Normal file
View File

@@ -0,0 +1,165 @@
// Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"fmt"
"io"
"os"
)
// ReadOneByte reads one byte from given io.ReaderAt.
func ReadOneByte(r io.ReaderAt) error {
buf := make([]byte, 1)
n, err := r.ReadAt(buf, 0)
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("expected to read 1 byte, but got %d", n)
}
return nil
}
// LazyOpener is a lazy io.Reader.
//
// LazyOpener will use a given open function to derive an io.Reader when Read
// is first called on the LazyOpener.
type LazyOpener struct {
r io.Reader
s string
err error
open func() (io.Reader, error)
}
// NewLazyOpener returns a lazy io.Reader based on `open`.
func NewLazyOpener(filename string, open func() (io.Reader, error)) *LazyOpener {
if len(filename) == 0 {
return nil
}
return &LazyOpener{s: filename, open: open}
}
// Read implements io.Reader.Read lazily.
//
// If called for the first time, the underlying reader will be obtained and
// then used for the first and subsequent calls to Read.
func (lr *LazyOpener) Read(p []byte) (int, error) {
if lr.r == nil && lr.err == nil {
lr.r, lr.err = lr.open()
}
if lr.err != nil {
return 0, lr.err
}
return lr.r.Read(p)
}
// String implements fmt.Stringer.
func (lr *LazyOpener) String() string {
if len(lr.s) > 0 {
return lr.s
}
if lr.r != nil {
return fmt.Sprintf("%v", lr.r)
}
return "unopened mystery file"
}
// Close implements io.Closer.Close.
func (lr *LazyOpener) Close() error {
if c, ok := lr.r.(io.Closer); ok {
return c.Close()
}
return nil
}
// LazyOpenerAt is a lazy io.ReaderAt.
//
// LazyOpenerAt will use a given open function to derive an io.ReaderAt when
// ReadAt is first called.
type LazyOpenerAt struct {
r io.ReaderAt
s string
err error
limit int64
open func() (io.ReaderAt, error)
}
// NewLazyFile returns a lazy ReaderAt opened from path.
func NewLazyFile(path string) *LazyOpenerAt {
if len(path) == 0 {
return nil
}
return NewLazyOpenerAt(path, func() (io.ReaderAt, error) {
return os.Open(path)
})
}
// NewLazyLimitFile returns a lazy ReaderAt opened from path with a limit reader on it.
func NewLazyLimitFile(path string, limit int64) *LazyOpenerAt {
if len(path) == 0 {
return nil
}
return NewLazyLimitOpenerAt(path, limit, func() (io.ReaderAt, error) {
return os.Open(path)
})
}
// NewLazyOpenerAt returns a lazy io.ReaderAt based on `open`.
func NewLazyOpenerAt(filename string, open func() (io.ReaderAt, error)) *LazyOpenerAt {
return &LazyOpenerAt{s: filename, open: open, limit: -1}
}
// NewLazyLimitOpenerAt returns a lazy io.ReaderAt based on `open`.
func NewLazyLimitOpenerAt(filename string, limit int64, open func() (io.ReaderAt, error)) *LazyOpenerAt {
return &LazyOpenerAt{s: filename, open: open, limit: limit}
}
// String implements fmt.Stringer.
func (loa *LazyOpenerAt) String() string {
if len(loa.s) > 0 {
return loa.s
}
if loa.r != nil {
return fmt.Sprintf("%v", loa.r)
}
return "unopened mystery file"
}
// File returns the backend file of the io.ReaderAt if it
// is backed by a os.File.
func (loa *LazyOpenerAt) File() *os.File {
if f, ok := loa.r.(*os.File); ok {
return f
}
return nil
}
// ReadAt implements io.ReaderAt.ReadAt.
func (loa *LazyOpenerAt) ReadAt(p []byte, off int64) (int, error) {
if loa.r == nil && loa.err == nil {
loa.r, loa.err = loa.open()
}
if loa.err != nil {
return 0, loa.err
}
if loa.limit > 0 {
if off >= loa.limit {
return 0, io.EOF
}
if int64(len(p)) > loa.limit-off {
p = p[0 : loa.limit-off]
}
}
return loa.r.ReadAt(p, off)
}
// Close implements io.Closer.Close.
func (loa *LazyOpenerAt) Close() error {
if c, ok := loa.r.(io.Closer); ok {
return c.Close()
}
return nil
}

57
vendor/github.com/u-root/uio/uio/linewriter.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
// Copyright 2019 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"bytes"
"io"
)
// LineWriter processes one line of log output at a time.
type LineWriter interface {
// OneLine is always called with exactly one line of output.
OneLine(b []byte)
}
// FullLineWriter returns an io.Writer that waits for a full line of prints
// before calling w.Write on one line each.
func FullLineWriter(w LineWriter) io.WriteCloser {
return &fullLineWriter{w: w}
}
type fullLineWriter struct {
w LineWriter
buffer []byte
}
func (fsw *fullLineWriter) printBuf() {
bufs := bytes.Split(fsw.buffer, []byte{'\n'})
for _, buf := range bufs {
if len(buf) != 0 {
fsw.w.OneLine(buf)
}
}
fsw.buffer = nil
}
// Write implements io.Writer and buffers p until at least one full line is
// received.
func (fsw *fullLineWriter) Write(p []byte) (int, error) {
i := bytes.LastIndexByte(p, '\n')
if i == -1 {
fsw.buffer = append(fsw.buffer, p...)
} else {
fsw.buffer = append(fsw.buffer, p[:i]...)
fsw.printBuf()
fsw.buffer = append([]byte{}, p[i:]...)
}
return len(p), nil
}
// Close implements io.Closer and flushes the buffer.
func (fsw *fullLineWriter) Close() error {
fsw.printBuf()
return nil
}

37
vendor/github.com/u-root/uio/uio/multiwriter.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright 2019 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"io"
)
type multiCloser struct {
io.Writer
writers []io.Writer
}
// Close implements io.Closer and closes any io.Writers that are also
// io.Closers.
func (mc *multiCloser) Close() error {
var allErr error
for _, w := range mc.writers {
if c, ok := w.(io.Closer); ok {
if err := c.Close(); err != nil {
allErr = err
}
}
}
return allErr
}
// MultiWriteCloser is an io.MultiWriter that has an io.Closer and attempts to
// close those w's that have optional io.Closers.
func MultiWriteCloser(w ...io.Writer) io.WriteCloser {
return &multiCloser{
Writer: io.MultiWriter(w...),
writers: w,
}
}

70
vendor/github.com/u-root/uio/uio/null.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
// Copyright 2012-2019 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Discard implementation copied from the Go project:
// https://golang.org/src/io/ioutil/ioutil.go.
// Copyright 2009 The Go Authors. All rights reserved.
package uio
import (
"io"
"sync"
)
type devNull int
// devNull implements ReaderFrom as an optimization so io.Copy to
// ioutil.Discard can avoid doing unnecessary work.
var _ io.ReaderFrom = devNull(0)
func (devNull) Write(p []byte) (int, error) {
return len(p), nil
}
func (devNull) Name() string {
return "null"
}
func (devNull) WriteString(s string) (int, error) {
return len(s), nil
}
var blackHolePool = sync.Pool{
New: func() interface{} {
b := make([]byte, 8192)
return &b
},
}
func (devNull) ReadFrom(r io.Reader) (n int64, err error) {
bufp := blackHolePool.Get().(*[]byte)
var readSize int
for {
readSize, err = r.Read(*bufp)
n += int64(readSize)
if err != nil {
blackHolePool.Put(bufp)
if err == io.EOF {
return n, nil
}
return
}
}
}
func (devNull) Close() error {
return nil
}
// WriteNameCloser is the interface that groups Write, Close, and Name methods.
type WriteNameCloser interface {
io.Writer
io.Closer
Name() string
}
// Discard is a WriteNameCloser on which all Write and Close calls succeed
// without doing anything, and the Name call returns "null".
var Discard WriteNameCloser = devNull(0)

42
vendor/github.com/u-root/uio/uio/progress.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright 2019 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"io"
"strings"
)
// ProgressReadCloser implements io.ReadCloser and prints Symbol to W after every
// Interval bytes passes through RC.
type ProgressReadCloser struct {
RC io.ReadCloser
Symbol string
Interval int
W io.Writer
counter int
written bool
}
// Read implements io.Reader for ProgressReadCloser.
func (rc *ProgressReadCloser) Read(p []byte) (n int, err error) {
defer func() {
numSymbols := (rc.counter%rc.Interval + n) / rc.Interval
_, _ = rc.W.Write([]byte(strings.Repeat(rc.Symbol, numSymbols)))
rc.counter += n
rc.written = (rc.written || numSymbols > 0)
if err == io.EOF && rc.written {
_, _ = rc.W.Write([]byte("\n"))
}
}()
return rc.RC.Read(p)
}
// Read implements io.Closer for ProgressReader.
func (rc *ProgressReadCloser) Close() error {
return rc.RC.Close()
}

67
vendor/github.com/u-root/uio/uio/reader.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uio
import (
"bytes"
"io"
"math"
"os"
"reflect"
)
type inMemReaderAt interface {
Bytes() []byte
}
// ReadAll reads everything that r contains.
//
// Callers *must* not modify bytes in the returned byte slice.
//
// If r is an in-memory representation, ReadAll will attempt to return a
// pointer to those bytes directly.
func ReadAll(r io.ReaderAt) ([]byte, error) {
if imra, ok := r.(inMemReaderAt); ok {
return imra.Bytes(), nil
}
return io.ReadAll(Reader(r))
}
// Reader generates a Reader from a ReaderAt.
func Reader(r io.ReaderAt) io.Reader {
return io.NewSectionReader(r, 0, math.MaxInt64)
}
// ReaderAtEqual compares the contents of r1 and r2.
func ReaderAtEqual(r1, r2 io.ReaderAt) bool {
var c, d []byte
var r1err, r2err error
if r1 != nil {
c, r1err = ReadAll(r1)
}
if r2 != nil {
d, r2err = ReadAll(r2)
}
return bytes.Equal(c, d) && reflect.DeepEqual(r1err, r2err)
}
// ReadIntoFile reads all from io.Reader into the file at given path.
//
// If the file at given path does not exist, a new file will be created.
// If the file exists at the given path, but not empty, it will be truncated.
func ReadIntoFile(r io.Reader, p string) error {
f, err := os.OpenFile(p, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, r)
if err != nil {
return err
}
return f.Close()
}

9
vendor/github.com/u-root/uio/uio/uio.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
// Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uio unifies commonly used io utilities for u-root.
//
// uio's most used feature is the Buffer/Lexer combination to parse binary data
// of arbitrary endianness into data structures.
package uio