Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

247
vendor/tailscale.com/util/zstdframe/options.go generated vendored Normal file
View File

@@ -0,0 +1,247 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package zstdframe
import (
"math/bits"
"strconv"
"sync"
"github.com/klauspost/compress/zstd"
"tailscale.com/util/must"
)
// Option is an option that can be passed to [AppendEncode] or [AppendDecode].
type Option interface{ isOption() }
type encoderLevel int
// Constants that implement [Option] and can be passed to [AppendEncode].
const (
FastestCompression = encoderLevel(zstd.SpeedFastest)
DefaultCompression = encoderLevel(zstd.SpeedDefault)
BetterCompression = encoderLevel(zstd.SpeedBetterCompression)
BestCompression = encoderLevel(zstd.SpeedBestCompression)
)
func (encoderLevel) isOption() {}
// EncoderLevel specifies the compression level when encoding.
//
// This exists for compatibility with [zstd.EncoderLevel] values.
// Most usages should directly use one of the following constants:
// - [FastestCompression]
// - [DefaultCompression]
// - [BetterCompression]
// - [BestCompression]
//
// By default, [DefaultCompression] is chosen.
// This option is ignored when decoding.
func EncoderLevel(level zstd.EncoderLevel) Option { return encoderLevel(level) }
type withChecksum bool
func (withChecksum) isOption() {}
// WithChecksum specifies whether to produce a checksum when encoding,
// or whether to verify the checksum when decoding.
// By default, checksums are produced and verified.
func WithChecksum(check bool) Option { return withChecksum(check) }
type maxDecodedSize uint64
func (maxDecodedSize) isOption() {}
type maxDecodedSizeLog2 uint8 // uint8 avoids allocation when storing into interface
func (maxDecodedSizeLog2) isOption() {}
// MaxDecodedSize specifies the maximum decoded size and
// is used to protect against hostile content.
// By default, there is no limit.
// This option is ignored when encoding.
func MaxDecodedSize(maxSize uint64) Option {
if bits.OnesCount64(maxSize) == 1 {
return maxDecodedSizeLog2(log2(maxSize))
}
return maxDecodedSize(maxSize)
}
type maxWindowSizeLog2 uint8 // uint8 avoids allocation when storing into interface
func (maxWindowSizeLog2) isOption() {}
// MaxWindowSize specifies the maximum window size, which must be a power-of-two
// and be in the range of [[zstd.MinWindowSize], [zstd.MaxWindowSize]].
//
// The compression or decompression algorithm will use a LZ77 rolling window
// no larger than the specified size. The compression ratio will be
// adversely affected, but memory requirements will be lower.
// When decompressing, an error is reported if a LZ77 back reference exceeds
// the specified maximum window size.
//
// For decompression, [MaxDecodedSize] is generally more useful.
func MaxWindowSize(maxSize uint64) Option {
switch {
case maxSize < zstd.MinWindowSize:
panic("maximum window size cannot be less than " + strconv.FormatUint(zstd.MinWindowSize, 10))
case bits.OnesCount64(maxSize) != 1:
panic("maximum window size must be a power-of-two")
case maxSize > zstd.MaxWindowSize:
panic("maximum window size cannot be greater than " + strconv.FormatUint(zstd.MaxWindowSize, 10))
default:
return maxWindowSizeLog2(log2(maxSize))
}
}
type lowMemory bool
func (lowMemory) isOption() {}
// LowMemory specifies that the encoder and decoder should aim to use
// lower amounts of memory at the cost of speed.
// By default, more memory used for better speed.
func LowMemory(low bool) Option { return lowMemory(low) }
var encoderPools sync.Map // map[encoderOptions]*sync.Pool -> *zstd.Encoder
type encoderOptions struct {
level zstd.EncoderLevel
maxWindowLog2 uint8
checksum bool
lowMemory bool
}
type encoder struct {
pool *sync.Pool
*zstd.Encoder
}
func getEncoder(opts ...Option) encoder {
eopts := encoderOptions{level: zstd.SpeedDefault, checksum: true}
for _, opt := range opts {
switch opt := opt.(type) {
case encoderLevel:
eopts.level = zstd.EncoderLevel(opt)
case maxWindowSizeLog2:
eopts.maxWindowLog2 = uint8(opt)
case withChecksum:
eopts.checksum = bool(opt)
case lowMemory:
eopts.lowMemory = bool(opt)
}
}
vpool, ok := encoderPools.Load(eopts)
if !ok {
vpool, _ = encoderPools.LoadOrStore(eopts, new(sync.Pool))
}
pool := vpool.(*sync.Pool)
enc, _ := pool.Get().(*zstd.Encoder)
if enc == nil {
var noopts int
zopts := [...]zstd.EOption{
// Set concurrency=1 to ensure synchronous operation.
zstd.WithEncoderConcurrency(1),
// In stateless compression, the data is already in a single buffer,
// so we might as well encode it as a single segment,
// which ensures that the Frame_Content_Size is always populated,
// informing decoders up-front the expected decompressed size.
zstd.WithSingleSegment(true),
// Ensure strict compliance with RFC 8878, section 3.1.,
// where zstandard "is made up of one or more frames".
zstd.WithZeroFrames(true),
zstd.WithEncoderLevel(eopts.level),
zstd.WithEncoderCRC(eopts.checksum),
zstd.WithLowerEncoderMem(eopts.lowMemory),
nil, // reserved for zstd.WithWindowSize
}
if eopts.maxWindowLog2 > 0 {
zopts[len(zopts)-noopts-1] = zstd.WithWindowSize(1 << eopts.maxWindowLog2)
} else {
noopts++
}
enc = must.Get(zstd.NewWriter(nil, zopts[:len(zopts)-noopts]...))
}
return encoder{pool, enc}
}
func putEncoder(e encoder) { e.pool.Put(e.Encoder) }
var decoderPools sync.Map // map[decoderOptions]*sync.Pool -> *zstd.Decoder
type decoderOptions struct {
maxSizeLog2 uint8
maxWindowLog2 uint8
checksum bool
lowMemory bool
}
type decoder struct {
pool *sync.Pool
*zstd.Decoder
maxSize uint64
}
func getDecoder(opts ...Option) decoder {
maxSize := uint64(1 << 63)
dopts := decoderOptions{maxSizeLog2: 63, checksum: true}
for _, opt := range opts {
switch opt := opt.(type) {
case maxDecodedSizeLog2:
maxSize = 1 << uint8(opt)
dopts.maxSizeLog2 = uint8(opt)
case maxDecodedSize:
maxSize = uint64(opt)
dopts.maxSizeLog2 = uint8(log2(maxSize))
case maxWindowSizeLog2:
dopts.maxWindowLog2 = uint8(opt)
case withChecksum:
dopts.checksum = bool(opt)
case lowMemory:
dopts.lowMemory = bool(opt)
}
}
vpool, ok := decoderPools.Load(dopts)
if !ok {
vpool, _ = decoderPools.LoadOrStore(dopts, new(sync.Pool))
}
pool := vpool.(*sync.Pool)
dec, _ := pool.Get().(*zstd.Decoder)
if dec == nil {
var noopts int
zopts := [...]zstd.DOption{
// Set concurrency=1 to ensure synchronous operation.
zstd.WithDecoderConcurrency(1),
zstd.WithDecoderMaxMemory(1 << min(max(10, dopts.maxSizeLog2), 63)),
zstd.IgnoreChecksum(!dopts.checksum),
zstd.WithDecoderLowmem(dopts.lowMemory),
nil, // reserved for zstd.WithDecoderMaxWindow
}
if dopts.maxWindowLog2 > 0 {
zopts[len(zopts)-noopts-1] = zstd.WithDecoderMaxWindow(1 << dopts.maxWindowLog2)
} else {
noopts++
}
dec = must.Get(zstd.NewReader(nil, zopts[:len(zopts)-noopts]...))
}
return decoder{pool, dec, maxSize}
}
func putDecoder(d decoder) { d.pool.Put(d.Decoder) }
func (d decoder) DecodeAll(src, dst []byte) ([]byte, error) {
// We only configure DecodeAll to enforce MaxDecodedSize by powers-of-two.
// Perform a more fine grain check based on the exact value.
dst2, err := d.Decoder.DecodeAll(src, dst)
if err == nil && uint64(len(dst2)-len(dst)) > d.maxSize {
err = zstd.ErrDecoderSizeExceeded
}
return dst2, err
}
// log2 computes log2 of x rounded up to the nearest integer.
func log2(x uint64) int { return 64 - bits.LeadingZeros64(x-1) }

127
vendor/tailscale.com/util/zstdframe/zstd.go generated vendored Normal file
View File

@@ -0,0 +1,127 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package zstdframe provides functionality for encoding and decoding
// independently compressed zstandard frames.
package zstdframe
import (
"encoding/binary"
"io"
"github.com/klauspost/compress/zstd"
)
// The Go zstd API surface is not ergonomic:
//
// - Options are set via NewReader and NewWriter and immutable once set.
//
// - Stateless operations like EncodeAll and DecodeAll are methods on
// the Encoder and Decoder types, which implies that options cannot be
// changed without allocating an entirely new Encoder or Decoder.
//
// This is further strange as Encoder and Decoder types are either
// stateful or stateless objects depending on semantic context.
//
// - By default, the zstd package tries to be overly clever by spawning off
// multiple goroutines to do work, which can lead to both excessive fanout
// of resources and also subtle race conditions. Also, each Encoder/Decoder
// never relinquish resources, which makes it unsuitable for lower memory.
// We work around the zstd defaults by setting concurrency=1 on each coder
// and pool individual coders, allowing the Go GC to reclaim unused coders.
//
// See https://github.com/klauspost/compress/issues/264
// See https://github.com/klauspost/compress/issues/479
//
// - The EncodeAll and DecodeAll functions appends to a user-provided buffer,
// but uses a signature opposite of most append-like functions in Go,
// where the output buffer is the second argument, leading to footguns.
// The zstdframe package provides AppendEncode and AppendDecode functions
// that follows Go convention of the first argument being the output buffer
// similar to how the builtin append function operates.
//
// See https://github.com/klauspost/compress/issues/648
//
// - The zstd package is oddly inconsistent about naming. For example,
// IgnoreChecksum vs WithEncoderCRC, or
// WithDecoderLowmem vs WithLowerEncoderMem.
// Most options have a WithDecoder or WithEncoder prefix, but some do not.
//
// The zstdframe package wraps the zstd package and presents a more ergonomic API
// by providing stateless functions that take in variadic options.
// Pooling of resources is handled by this package to avoid each caller
// redundantly performing the same pooling at different call sites.
// TODO: Since compression is CPU bound,
// should we have a semaphore ensure at most one operation per CPU?
// AppendEncode appends the zstandard encoded content of src to dst.
// It emits exactly one frame as a single segment.
func AppendEncode(dst, src []byte, opts ...Option) []byte {
enc := getEncoder(opts...)
defer putEncoder(enc)
return enc.EncodeAll(src, dst)
}
// AppendDecode appends the zstandard decoded content of src to dst.
// The input may consist of zero or more frames.
// Any call that handles untrusted input should specify [MaxDecodedSize].
func AppendDecode(dst, src []byte, opts ...Option) ([]byte, error) {
dec := getDecoder(opts...)
defer putDecoder(dec)
return dec.DecodeAll(src, dst)
}
// NextSize parses the next frame (regardless of whether it is a
// data frame or a metadata frame) and returns the total size of the frame.
// The frame can be skipped by slicing n bytes from b (e.g., b[n:]).
// It report [io.ErrUnexpectedEOF] if the frame is incomplete.
func NextSize(b []byte) (n int, err error) {
// Parse the frame header (RFC 8878, section 3.1.1.).
var frame zstd.Header
if err := frame.Decode(b); err != nil {
return n, err
}
n += frame.HeaderSize
if frame.Skippable {
// Handle skippable frame (RFC 8878, section 3.1.2.).
if len(b[n:]) < int(frame.SkippableSize) {
return n, io.ErrUnexpectedEOF
}
n += int(frame.SkippableSize)
} else {
// Handle one or more Data_Blocks (RFC 8878, section 3.1.1.2.).
for {
if len(b[n:]) < 3 {
return n, io.ErrUnexpectedEOF
}
blockHeader := binary.LittleEndian.Uint32(b[n-1:]) >> 8 // load uint24
lastBlock := (blockHeader >> 0) & ((1 << 1) - 1)
blockType := (blockHeader >> 1) & ((1 << 2) - 1)
blockSize := (blockHeader >> 3) & ((1 << 21) - 1)
n += 3
if blockType == 1 {
// For RLE_Block (RFC 8878, section 3.1.1.2.2.),
// the Block_Content is only a single byte.
blockSize = 1
}
if len(b[n:]) < int(blockSize) {
return n, io.ErrUnexpectedEOF
}
n += int(blockSize)
if lastBlock != 0 {
break
}
}
// Handle optional Content_Checksum (RFC 8878, section 3.1.1.).
if frame.HasCheckSum {
if len(b[n:]) < 4 {
return n, io.ErrUnexpectedEOF
}
n += 4
}
}
return n, nil
}